Merge branches 'at91', 'dcache', 'ftrace', 'hwbpt', 'misc', 'mmci', 's3c', 'st-ux' and 'unwind' into devel
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index ecd35e9..feca075 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -46,7 +46,6 @@
<sect1><title>Atomic and pointer manipulation</title>
!Iarch/x86/include/asm/atomic.h
-!Iarch/x86/include/asm/unaligned.h
</sect1>
<sect1><title>Delaying, scheduling, and timer routines</title>
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index a20c6f6..6899f47 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -57,7 +57,6 @@
</para>
<sect1><title>String Conversions</title>
-!Ilib/vsprintf.c
!Elib/vsprintf.c
</sect1>
<sect1><title>String Manipulation</title>
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl
index 0b1a3f9..a0d479d 100644
--- a/Documentation/DocBook/kernel-locking.tmpl
+++ b/Documentation/DocBook/kernel-locking.tmpl
@@ -1961,6 +1961,12 @@
</sect1>
</chapter>
+ <chapter id="apiref">
+ <title>Mutex API reference</title>
+!Iinclude/linux/mutex.h
+!Ekernel/mutex.c
+ </chapter>
+
<chapter id="references">
<title>Further reading</title>
diff --git a/Documentation/block/cfq-iosched.txt b/Documentation/block/cfq-iosched.txt
new file mode 100644
index 0000000..e578fee
--- /dev/null
+++ b/Documentation/block/cfq-iosched.txt
@@ -0,0 +1,45 @@
+CFQ ioscheduler tunables
+========================
+
+slice_idle
+----------
+This specifies how long CFQ should idle for next request on certain cfq queues
+(for sequential workloads) and service trees (for random workloads) before
+queue is expired and CFQ selects next queue to dispatch from.
+
+By default slice_idle is a non-zero value. That means by default we idle on
+queues/service trees. This can be very helpful on highly seeky media like
+single spindle SATA/SAS disks where we can cut down on overall number of
+seeks and see improved throughput.
+
+Setting slice_idle to 0 will remove all the idling on queues/service tree
+level and one should see an overall improved throughput on faster storage
+devices like multiple SATA/SAS disks in hardware RAID configuration. The down
+side is that isolation provided from WRITES also goes down and notion of
+IO priority becomes weaker.
+
+So depending on storage and workload, it might be useful to set slice_idle=0.
+In general I think for SATA/SAS disks and software RAID of SATA/SAS disks
+keeping slice_idle enabled should be useful. For any configurations where
+there are multiple spindles behind single LUN (Host based hardware RAID
+controller or for storage arrays), setting slice_idle=0 might end up in better
+throughput and acceptable latencies.
+
+CFQ IOPS Mode for group scheduling
+===================================
+Basic CFQ design is to provide priority based time slices. Higher priority
+process gets bigger time slice and lower priority process gets smaller time
+slice. Measuring time becomes harder if storage is fast and supports NCQ and
+it would be better to dispatch multiple requests from multiple cfq queues in
+request queue at a time. In such scenario, it is not possible to measure time
+consumed by single queue accurately.
+
+What is possible though is to measure number of requests dispatched from a
+single queue and also allow dispatch from multiple cfq queue at the same time.
+This effectively becomes the fairness in terms of IOPS (IO operations per
+second).
+
+If one sets slice_idle=0 and if storage supports NCQ, CFQ internally switches
+to IOPS mode and starts providing fairness in terms of number of requests
+dispatched. Note that this mode switching takes effect only for group
+scheduling. For non-cgroup users nothing should change.
diff --git a/Documentation/cgroups/blkio-controller.txt b/Documentation/cgroups/blkio-controller.txt
index 48e0b21..6919d62 100644
--- a/Documentation/cgroups/blkio-controller.txt
+++ b/Documentation/cgroups/blkio-controller.txt
@@ -217,6 +217,7 @@
CFQ sysfs tunable
=================
/sys/block/<disk>/queue/iosched/group_isolation
+-----------------------------------------------
If group_isolation=1, it provides stronger isolation between groups at the
expense of throughput. By default group_isolation is 0. In general that
@@ -243,6 +244,33 @@
and one wants stronger isolation between groups, then set group_isolation=1
but this will come at cost of reduced throughput.
+/sys/block/<disk>/queue/iosched/slice_idle
+------------------------------------------
+On a faster hardware CFQ can be slow, especially with sequential workload.
+This happens because CFQ idles on a single queue and single queue might not
+drive deeper request queue depths to keep the storage busy. In such scenarios
+one can try setting slice_idle=0 and that would switch CFQ to IOPS
+(IO operations per second) mode on NCQ supporting hardware.
+
+That means CFQ will not idle between cfq queues of a cfq group and hence be
+able to driver higher queue depth and achieve better throughput. That also
+means that cfq provides fairness among groups in terms of IOPS and not in
+terms of disk time.
+
+/sys/block/<disk>/queue/iosched/group_idle
+------------------------------------------
+If one disables idling on individual cfq queues and cfq service trees by
+setting slice_idle=0, group_idle kicks in. That means CFQ will still idle
+on the group in an attempt to provide fairness among groups.
+
+By default group_idle is same as slice_idle and does not do anything if
+slice_idle is enabled.
+
+One can experience an overall throughput drop if you have created multiple
+groups and put applications in that group which are not driving enough
+IO to keep disk busy. In that case set group_idle=0, and CFQ will not idle
+on individual groups and throughput should improve.
+
What works
==========
- Currently only sync IO queues are support. All the buffered writes are
diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt
index d96a6db..9633da0 100644
--- a/Documentation/gpio.txt
+++ b/Documentation/gpio.txt
@@ -109,17 +109,19 @@
If you want to initialize a structure with an invalid GPIO number, use
some negative number (perhaps "-EINVAL"); that will never be valid. To
-test if a number could reference a GPIO, you may use this predicate:
+test if such number from such a structure could reference a GPIO, you
+may use this predicate:
int gpio_is_valid(int number);
A number that's not valid will be rejected by calls which may request
or free GPIOs (see below). Other numbers may also be rejected; for
-example, a number might be valid but unused on a given board.
+example, a number might be valid but temporarily unused on a given board.
-Whether a platform supports multiple GPIO controllers is currently a
-platform-specific implementation issue.
-
+Whether a platform supports multiple GPIO controllers is a platform-specific
+implementation issue, as are whether that support can leave "holes" in the space
+of GPIO numbers, and whether new controllers can be added at runtime. Such issues
+can affect things including whether adjacent GPIO numbers are both valid.
Using GPIOs
-----------
@@ -480,12 +482,16 @@
ARCH_REQUIRE_GPIOLIB or ARCH_WANT_OPTIONAL_GPIOLIB
and arrange that its <asm/gpio.h> includes <asm-generic/gpio.h> and defines
three functions: gpio_get_value(), gpio_set_value(), and gpio_cansleep().
-They may also want to provide a custom value for ARCH_NR_GPIOS.
-ARCH_REQUIRE_GPIOLIB means that the gpio-lib code will always get compiled
+It may also provide a custom value for ARCH_NR_GPIOS, so that it better
+reflects the number of GPIOs in actual use on that platform, without
+wasting static table space. (It should count both built-in/SoC GPIOs and
+also ones on GPIO expanders.
+
+ARCH_REQUIRE_GPIOLIB means that the gpiolib code will always get compiled
into the kernel on that architecture.
-ARCH_WANT_OPTIONAL_GPIOLIB means the gpio-lib code defaults to off and the user
+ARCH_WANT_OPTIONAL_GPIOLIB means the gpiolib code defaults to off and the user
can enable it and build it into the kernel optionally.
If neither of these options are selected, the platform does not support
diff --git a/Documentation/hwmon/sysfs-interface b/Documentation/hwmon/sysfs-interface
index ff45d1f..48ceabe 100644
--- a/Documentation/hwmon/sysfs-interface
+++ b/Documentation/hwmon/sysfs-interface
@@ -91,12 +91,11 @@
I2C devices get this attribute created automatically.
RO
-update_rate The rate at which the chip will update readings.
+update_interval The interval at which the chip will update readings.
Unit: millisecond
RW
- Some devices have a variable update rate. This attribute
- can be used to change the update rate to the desired
- frequency.
+ Some devices have a variable update rate or interval.
+ This attribute can be used to change it to the desired value.
************
diff --git a/Documentation/kernel-doc-nano-HOWTO.txt b/Documentation/kernel-doc-nano-HOWTO.txt
index 27a52b3..3d8a977 100644
--- a/Documentation/kernel-doc-nano-HOWTO.txt
+++ b/Documentation/kernel-doc-nano-HOWTO.txt
@@ -345,5 +345,10 @@
section titled <section title> from <filename>.
Spaces are allowed in <section title>; do not quote the <section title>.
+!C<filename> is replaced by nothing, but makes the tools check that
+all DOC: sections and documented functions, symbols, etc. are used.
+This makes sense to use when you use !F/!P only and want to verify
+that all documentation is included.
+
Tim.
*/ <twaugh@redhat.com>
diff --git a/Documentation/mutex-design.txt b/Documentation/mutex-design.txt
index c91ccc0..38c10fd 100644
--- a/Documentation/mutex-design.txt
+++ b/Documentation/mutex-design.txt
@@ -9,7 +9,7 @@
mutex semantics are sufficient for your code, then there are a couple
of advantages of mutexes:
- - 'struct mutex' is smaller on most architectures: .e.g on x86,
+ - 'struct mutex' is smaller on most architectures: E.g. on x86,
'struct semaphore' is 20 bytes, 'struct mutex' is 16 bytes.
A smaller structure size means less RAM footprint, and better
CPU-cache utilization.
@@ -136,3 +136,4 @@
void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
int mutex_lock_interruptible_nested(struct mutex *lock,
unsigned int subclass);
+ int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
diff --git a/Documentation/power/regulator/overview.txt b/Documentation/power/regulator/overview.txt
index 9363e05..8ed1758 100644
--- a/Documentation/power/regulator/overview.txt
+++ b/Documentation/power/regulator/overview.txt
@@ -13,7 +13,7 @@
current limit is controllable).
(C) 2008 Wolfson Microelectronics PLC.
-Author: Liam Girdwood <lg@opensource.wolfsonmicro.com>
+Author: Liam Girdwood <lrg@slimlogic.co.uk>
Nomenclature
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index ce46fa1..37c6aad 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -296,6 +296,7 @@
Conexant 5066
=============
laptop Basic Laptop config (default)
+ hp-laptop HP laptops, e g G60
dell-laptop Dell laptops
dell-vostro Dell Vostro
olpc-xo-1_5 OLPC XO 1.5
diff --git a/Documentation/workqueue.txt b/Documentation/workqueue.txt
new file mode 100644
index 0000000..e4498a2
--- /dev/null
+++ b/Documentation/workqueue.txt
@@ -0,0 +1,380 @@
+
+Concurrency Managed Workqueue (cmwq)
+
+September, 2010 Tejun Heo <tj@kernel.org>
+ Florian Mickler <florian@mickler.org>
+
+CONTENTS
+
+1. Introduction
+2. Why cmwq?
+3. The Design
+4. Application Programming Interface (API)
+5. Example Execution Scenarios
+6. Guidelines
+
+
+1. Introduction
+
+There are many cases where an asynchronous process execution context
+is needed and the workqueue (wq) API is the most commonly used
+mechanism for such cases.
+
+When such an asynchronous execution context is needed, a work item
+describing which function to execute is put on a queue. An
+independent thread serves as the asynchronous execution context. The
+queue is called workqueue and the thread is called worker.
+
+While there are work items on the workqueue the worker executes the
+functions associated with the work items one after the other. When
+there is no work item left on the workqueue the worker becomes idle.
+When a new work item gets queued, the worker begins executing again.
+
+
+2. Why cmwq?
+
+In the original wq implementation, a multi threaded (MT) wq had one
+worker thread per CPU and a single threaded (ST) wq had one worker
+thread system-wide. A single MT wq needed to keep around the same
+number of workers as the number of CPUs. The kernel grew a lot of MT
+wq users over the years and with the number of CPU cores continuously
+rising, some systems saturated the default 32k PID space just booting
+up.
+
+Although MT wq wasted a lot of resource, the level of concurrency
+provided was unsatisfactory. The limitation was common to both ST and
+MT wq albeit less severe on MT. Each wq maintained its own separate
+worker pool. A MT wq could provide only one execution context per CPU
+while a ST wq one for the whole system. Work items had to compete for
+those very limited execution contexts leading to various problems
+including proneness to deadlocks around the single execution context.
+
+The tension between the provided level of concurrency and resource
+usage also forced its users to make unnecessary tradeoffs like libata
+choosing to use ST wq for polling PIOs and accepting an unnecessary
+limitation that no two polling PIOs can progress at the same time. As
+MT wq don't provide much better concurrency, users which require
+higher level of concurrency, like async or fscache, had to implement
+their own thread pool.
+
+Concurrency Managed Workqueue (cmwq) is a reimplementation of wq with
+focus on the following goals.
+
+* Maintain compatibility with the original workqueue API.
+
+* Use per-CPU unified worker pools shared by all wq to provide
+ flexible level of concurrency on demand without wasting a lot of
+ resource.
+
+* Automatically regulate worker pool and level of concurrency so that
+ the API users don't need to worry about such details.
+
+
+3. The Design
+
+In order to ease the asynchronous execution of functions a new
+abstraction, the work item, is introduced.
+
+A work item is a simple struct that holds a pointer to the function
+that is to be executed asynchronously. Whenever a driver or subsystem
+wants a function to be executed asynchronously it has to set up a work
+item pointing to that function and queue that work item on a
+workqueue.
+
+Special purpose threads, called worker threads, execute the functions
+off of the queue, one after the other. If no work is queued, the
+worker threads become idle. These worker threads are managed in so
+called thread-pools.
+
+The cmwq design differentiates between the user-facing workqueues that
+subsystems and drivers queue work items on and the backend mechanism
+which manages thread-pool and processes the queued work items.
+
+The backend is called gcwq. There is one gcwq for each possible CPU
+and one gcwq to serve work items queued on unbound workqueues.
+
+Subsystems and drivers can create and queue work items through special
+workqueue API functions as they see fit. They can influence some
+aspects of the way the work items are executed by setting flags on the
+workqueue they are putting the work item on. These flags include
+things like CPU locality, reentrancy, concurrency limits and more. To
+get a detailed overview refer to the API description of
+alloc_workqueue() below.
+
+When a work item is queued to a workqueue, the target gcwq is
+determined according to the queue parameters and workqueue attributes
+and appended on the shared worklist of the gcwq. For example, unless
+specifically overridden, a work item of a bound workqueue will be
+queued on the worklist of exactly that gcwq that is associated to the
+CPU the issuer is running on.
+
+For any worker pool implementation, managing the concurrency level
+(how many execution contexts are active) is an important issue. cmwq
+tries to keep the concurrency at a minimal but sufficient level.
+Minimal to save resources and sufficient in that the system is used at
+its full capacity.
+
+Each gcwq bound to an actual CPU implements concurrency management by
+hooking into the scheduler. The gcwq is notified whenever an active
+worker wakes up or sleeps and keeps track of the number of the
+currently runnable workers. Generally, work items are not expected to
+hog a CPU and consume many cycles. That means maintaining just enough
+concurrency to prevent work processing from stalling should be
+optimal. As long as there are one or more runnable workers on the
+CPU, the gcwq doesn't start execution of a new work, but, when the
+last running worker goes to sleep, it immediately schedules a new
+worker so that the CPU doesn't sit idle while there are pending work
+items. This allows using a minimal number of workers without losing
+execution bandwidth.
+
+Keeping idle workers around doesn't cost other than the memory space
+for kthreads, so cmwq holds onto idle ones for a while before killing
+them.
+
+For an unbound wq, the above concurrency management doesn't apply and
+the gcwq for the pseudo unbound CPU tries to start executing all work
+items as soon as possible. The responsibility of regulating
+concurrency level is on the users. There is also a flag to mark a
+bound wq to ignore the concurrency management. Please refer to the
+API section for details.
+
+Forward progress guarantee relies on that workers can be created when
+more execution contexts are necessary, which in turn is guaranteed
+through the use of rescue workers. All work items which might be used
+on code paths that handle memory reclaim are required to be queued on
+wq's that have a rescue-worker reserved for execution under memory
+pressure. Else it is possible that the thread-pool deadlocks waiting
+for execution contexts to free up.
+
+
+4. Application Programming Interface (API)
+
+alloc_workqueue() allocates a wq. The original create_*workqueue()
+functions are deprecated and scheduled for removal. alloc_workqueue()
+takes three arguments - @name, @flags and @max_active. @name is the
+name of the wq and also used as the name of the rescuer thread if
+there is one.
+
+A wq no longer manages execution resources but serves as a domain for
+forward progress guarantee, flush and work item attributes. @flags
+and @max_active control how work items are assigned execution
+resources, scheduled and executed.
+
+@flags:
+
+ WQ_NON_REENTRANT
+
+ By default, a wq guarantees non-reentrance only on the same
+ CPU. A work item may not be executed concurrently on the same
+ CPU by multiple workers but is allowed to be executed
+ concurrently on multiple CPUs. This flag makes sure
+ non-reentrance is enforced across all CPUs. Work items queued
+ to a non-reentrant wq are guaranteed to be executed by at most
+ one worker system-wide at any given time.
+
+ WQ_UNBOUND
+
+ Work items queued to an unbound wq are served by a special
+ gcwq which hosts workers which are not bound to any specific
+ CPU. This makes the wq behave as a simple execution context
+ provider without concurrency management. The unbound gcwq
+ tries to start execution of work items as soon as possible.
+ Unbound wq sacrifices locality but is useful for the following
+ cases.
+
+ * Wide fluctuation in the concurrency level requirement is
+ expected and using bound wq may end up creating large number
+ of mostly unused workers across different CPUs as the issuer
+ hops through different CPUs.
+
+ * Long running CPU intensive workloads which can be better
+ managed by the system scheduler.
+
+ WQ_FREEZEABLE
+
+ A freezeable wq participates in the freeze phase of the system
+ suspend operations. Work items on the wq are drained and no
+ new work item starts execution until thawed.
+
+ WQ_RESCUER
+
+ All wq which might be used in the memory reclaim paths _MUST_
+ have this flag set. This reserves one worker exclusively for
+ the execution of this wq under memory pressure.
+
+ WQ_HIGHPRI
+
+ Work items of a highpri wq are queued at the head of the
+ worklist of the target gcwq and start execution regardless of
+ the current concurrency level. In other words, highpri work
+ items will always start execution as soon as execution
+ resource is available.
+
+ Ordering among highpri work items is preserved - a highpri
+ work item queued after another highpri work item will start
+ execution after the earlier highpri work item starts.
+
+ Although highpri work items are not held back by other
+ runnable work items, they still contribute to the concurrency
+ level. Highpri work items in runnable state will prevent
+ non-highpri work items from starting execution.
+
+ This flag is meaningless for unbound wq.
+
+ WQ_CPU_INTENSIVE
+
+ Work items of a CPU intensive wq do not contribute to the
+ concurrency level. In other words, runnable CPU intensive
+ work items will not prevent other work items from starting
+ execution. This is useful for bound work items which are
+ expected to hog CPU cycles so that their execution is
+ regulated by the system scheduler.
+
+ Although CPU intensive work items don't contribute to the
+ concurrency level, start of their executions is still
+ regulated by the concurrency management and runnable
+ non-CPU-intensive work items can delay execution of CPU
+ intensive work items.
+
+ This flag is meaningless for unbound wq.
+
+ WQ_HIGHPRI | WQ_CPU_INTENSIVE
+
+ This combination makes the wq avoid interaction with
+ concurrency management completely and behave as a simple
+ per-CPU execution context provider. Work items queued on a
+ highpri CPU-intensive wq start execution as soon as resources
+ are available and don't affect execution of other work items.
+
+@max_active:
+
+@max_active determines the maximum number of execution contexts per
+CPU which can be assigned to the work items of a wq. For example,
+with @max_active of 16, at most 16 work items of the wq can be
+executing at the same time per CPU.
+
+Currently, for a bound wq, the maximum limit for @max_active is 512
+and the default value used when 0 is specified is 256. For an unbound
+wq, the limit is higher of 512 and 4 * num_possible_cpus(). These
+values are chosen sufficiently high such that they are not the
+limiting factor while providing protection in runaway cases.
+
+The number of active work items of a wq is usually regulated by the
+users of the wq, more specifically, by how many work items the users
+may queue at the same time. Unless there is a specific need for
+throttling the number of active work items, specifying '0' is
+recommended.
+
+Some users depend on the strict execution ordering of ST wq. The
+combination of @max_active of 1 and WQ_UNBOUND is used to achieve this
+behavior. Work items on such wq are always queued to the unbound gcwq
+and only one work item can be active at any given time thus achieving
+the same ordering property as ST wq.
+
+
+5. Example Execution Scenarios
+
+The following example execution scenarios try to illustrate how cmwq
+behave under different configurations.
+
+ Work items w0, w1, w2 are queued to a bound wq q0 on the same CPU.
+ w0 burns CPU for 5ms then sleeps for 10ms then burns CPU for 5ms
+ again before finishing. w1 and w2 burn CPU for 5ms then sleep for
+ 10ms.
+
+Ignoring all other tasks, works and processing overhead, and assuming
+simple FIFO scheduling, the following is one highly simplified version
+of possible sequences of events with the original wq.
+
+ TIME IN MSECS EVENT
+ 0 w0 starts and burns CPU
+ 5 w0 sleeps
+ 15 w0 wakes up and burns CPU
+ 20 w0 finishes
+ 20 w1 starts and burns CPU
+ 25 w1 sleeps
+ 35 w1 wakes up and finishes
+ 35 w2 starts and burns CPU
+ 40 w2 sleeps
+ 50 w2 wakes up and finishes
+
+And with cmwq with @max_active >= 3,
+
+ TIME IN MSECS EVENT
+ 0 w0 starts and burns CPU
+ 5 w0 sleeps
+ 5 w1 starts and burns CPU
+ 10 w1 sleeps
+ 10 w2 starts and burns CPU
+ 15 w2 sleeps
+ 15 w0 wakes up and burns CPU
+ 20 w0 finishes
+ 20 w1 wakes up and finishes
+ 25 w2 wakes up and finishes
+
+If @max_active == 2,
+
+ TIME IN MSECS EVENT
+ 0 w0 starts and burns CPU
+ 5 w0 sleeps
+ 5 w1 starts and burns CPU
+ 10 w1 sleeps
+ 15 w0 wakes up and burns CPU
+ 20 w0 finishes
+ 20 w1 wakes up and finishes
+ 20 w2 starts and burns CPU
+ 25 w2 sleeps
+ 35 w2 wakes up and finishes
+
+Now, let's assume w1 and w2 are queued to a different wq q1 which has
+WQ_HIGHPRI set,
+
+ TIME IN MSECS EVENT
+ 0 w1 and w2 start and burn CPU
+ 5 w1 sleeps
+ 10 w2 sleeps
+ 10 w0 starts and burns CPU
+ 15 w0 sleeps
+ 15 w1 wakes up and finishes
+ 20 w2 wakes up and finishes
+ 25 w0 wakes up and burns CPU
+ 30 w0 finishes
+
+If q1 has WQ_CPU_INTENSIVE set,
+
+ TIME IN MSECS EVENT
+ 0 w0 starts and burns CPU
+ 5 w0 sleeps
+ 5 w1 and w2 start and burn CPU
+ 10 w1 sleeps
+ 15 w2 sleeps
+ 15 w0 wakes up and burns CPU
+ 20 w0 finishes
+ 20 w1 wakes up and finishes
+ 25 w2 wakes up and finishes
+
+
+6. Guidelines
+
+* Do not forget to use WQ_RESCUER if a wq may process work items which
+ are used during memory reclaim. Each wq with WQ_RESCUER set has one
+ rescuer thread reserved for it. If there is dependency among
+ multiple work items used during memory reclaim, they should be
+ queued to separate wq each with WQ_RESCUER.
+
+* Unless strict ordering is required, there is no need to use ST wq.
+
+* Unless there is a specific need, using 0 for @max_active is
+ recommended. In most use cases, concurrency level usually stays
+ well under the default limit.
+
+* A wq serves as a domain for forward progress guarantee (WQ_RESCUER),
+ flush and work item attributes. Work items which are not involved
+ in memory reclaim and don't need to be flushed as a part of a group
+ of work items, and don't require any special attribute, can use one
+ of the system wq. There is no difference in execution
+ characteristics between using a dedicated wq and a system wq.
+
+* Unless work items are expected to consume a huge amount of CPU
+ cycles, using a bound wq is usually beneficial due to the increased
+ level of locality in wq operations and work item execution.
diff --git a/MAINTAINERS b/MAINTAINERS
index 087912a..668682d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1135,7 +1135,7 @@
M: Jay Cliburn <jcliburn@gmail.com>
M: Chris Snook <chris.snook@gmail.com>
M: Jie Yang <jie.yang@atheros.com>
-L: atl1-devel@lists.sourceforge.net
+L: netdev@vger.kernel.org
W: http://sourceforge.net/projects/atl1
W: http://atl1.sourceforge.net
S: Maintained
@@ -1220,7 +1220,7 @@
F: include/linux/cfag12864b.h
AVR32 ARCHITECTURE
-M: Haavard Skinnemoen <hskinnemoen@atmel.com>
+M: Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
W: http://www.atmel.com/products/AVR32/
W: http://avr32linux.org/
W: http://avrfreaks.net/
@@ -1228,7 +1228,7 @@
F: arch/avr32/
AVR32/AT32AP MACHINE SUPPORT
-M: Haavard Skinnemoen <hskinnemoen@atmel.com>
+M: Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
S: Supported
F: arch/avr32/mach-at32ap/
@@ -1445,6 +1445,16 @@
F: Documentation/video4linux/cafe_ccic
F: drivers/media/video/cafe_ccic*
+CAIF NETWORK LAYER
+M: Sjur Braendeland <sjur.brandeland@stericsson.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: Documentation/networking/caif/
+F: drivers/net/caif/
+F: include/linux/caif/
+F: include/net/caif/
+F: net/caif/
+
CALGARY x86-64 IOMMU
M: Muli Ben-Yehuda <muli@il.ibm.com>
M: "Jon D. Mason" <jdmason@kudzu.us>
@@ -2189,6 +2199,12 @@
S: Maintained
F: drivers/platform/x86/eeepc-laptop.c
+EFIFB FRAMEBUFFER DRIVER
+L: linux-fbdev@vger.kernel.org
+M: Peter Jones <pjones@redhat.com>
+S: Maintained
+F: drivers/video/efifb.c
+
EFS FILESYSTEM
W: http://aeschi.ch.eu.org/efs/
S: Orphan
@@ -2647,9 +2663,14 @@
F: drivers/media/video/gspca/
HARDWARE MONITORING
+M: Jean Delvare <khali@linux-fr.org>
+M: Guenter Roeck <guenter.roeck@ericsson.com>
L: lm-sensors@lm-sensors.org
W: http://www.lm-sensors.org/
-S: Orphan
+T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
+T: quilt kernel.org/pub/linux/kernel/people/groeck/linux-staging/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
+S: Maintained
F: Documentation/hwmon/
F: drivers/hwmon/
F: include/linux/hwmon*.h
@@ -2787,11 +2808,6 @@
F: arch/x86/kernel/hpet.c
F: arch/x86/include/asm/hpet.h
-HPET: ACPI
-M: Bob Picco <bob.picco@hp.com>
-S: Maintained
-F: drivers/char/hpet.c
-
HPFS FILESYSTEM
M: Mikulas Patocka <mikulas@artax.karlin.mff.cuni.cz>
W: http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi
@@ -3404,7 +3420,7 @@
KEXEC
M: Eric Biederman <ebiederm@xmission.com>
-W: http://ftp.kernel.org/pub/linux/kernel/people/horms/kexec-tools/
+W: http://kernel.org/pub/linux/utils/kernel/kexec/
L: kexec@lists.infradead.org
S: Maintained
F: include/linux/kexec.h
@@ -3891,10 +3907,8 @@
F: drivers/char/mxser.*
MSI LAPTOP SUPPORT
-M: Lennart Poettering <mzxreary@0pointer.de>
+M: Lee, Chun-Yi <jlee@novell.com>
L: platform-driver-x86@vger.kernel.org
-W: https://tango.0pointer.de/mailman/listinfo/s270-linux
-W: http://0pointer.de/lennart/tchibo.html
S: Maintained
F: drivers/platform/x86/msi-laptop.c
@@ -3911,8 +3925,10 @@
F: drivers/mfd/
MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM
-S: Orphan
+M: Chris Ball <cjb@laptop.org>
L: linux-mmc@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git
+S: Maintained
F: drivers/mmc/
F: include/linux/mmc/
@@ -3934,7 +3950,7 @@
F: include/linux/isicom.h
MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
-M: Felipe Balbi <felipe.balbi@nokia.com>
+M: Felipe Balbi <balbi@ti.com>
L: linux-usb@vger.kernel.org
T: git git://gitorious.org/usb/usb.git
S: Maintained
@@ -4232,7 +4248,7 @@
F: drivers/char/hw_random/omap-rng.c
OMAP USB SUPPORT
-M: Felipe Balbi <felipe.balbi@nokia.com>
+M: Felipe Balbi <balbi@ti.com>
M: David Brownell <dbrownell@users.sourceforge.net>
L: linux-usb@vger.kernel.org
L: linux-omap@vger.kernel.org
@@ -4810,6 +4826,7 @@
M: Josh Triplett <josh@freedesktop.org>
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
S: Supported
+T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
F: Documentation/RCU/torture.txt
F: kernel/rcutorture.c
@@ -4834,6 +4851,7 @@
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
W: http://www.rdrop.com/users/paulmck/rclock/
S: Supported
+T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
F: Documentation/RCU/
F: include/linux/rcu*
F: include/linux/srcu*
@@ -4841,12 +4859,10 @@
F: kernel/srcu*
X: kernel/rcutorture.c
-REAL TIME CLOCK DRIVER
+REAL TIME CLOCK DRIVER (LEGACY)
M: Paul Gortmaker <p_gortmaker@yahoo.com>
S: Maintained
-F: Documentation/rtc.txt
-F: drivers/rtc/
-F: include/linux/rtc.h
+F: drivers/char/rtc.c
REAL TIME CLOCK (RTC) SUBSYSTEM
M: Alessandro Zummo <a.zummo@towertech.it>
@@ -5083,8 +5099,10 @@
F: drivers/mmc/host/sdricoh_cs.c
SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER
-S: Orphan
+M: Chris Ball <cjb@laptop.org>
L: linux-mmc@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git
+S: Maintained
F: drivers/mmc/host/sdhci.*
SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF)
diff --git a/Makefile b/Makefile
index 4df9873..cf7fcb3 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 36
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc6
NAME = Sheep on Meth
# *DOCUMENTATION*
@@ -554,8 +554,15 @@
ifdef CONFIG_FRAME_POINTER
KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
else
+# Some targets (ARM with Thumb2, for example), can't be built with frame
+# pointers. For those, we don't have FUNCTION_TRACER automatically
+# select FRAME_POINTER. However, FUNCTION_TRACER adds -pg, and this is
+# incompatible with -fomit-frame-pointer with current GCC, so we don't use
+# -fomit-frame-pointer with FUNCTION_TRACER.
+ifndef CONFIG_FUNCTION_TRACER
KBUILD_CFLAGS += -fomit-frame-pointer
endif
+endif
ifdef CONFIG_DEBUG_INFO
KBUILD_CFLAGS += -g
diff --git a/arch/Kconfig b/arch/Kconfig
index 4877a8c..fe48fc7 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -32,8 +32,9 @@
config KPROBES
bool "Kprobes"
- depends on KALLSYMS && MODULES
+ depends on MODULES
depends on HAVE_KPROBES
+ select KALLSYMS
help
Kprobes allows you to trap at almost any kernel address and
execute a callback function. register_kprobe() establishes
@@ -45,7 +46,6 @@
def_bool y
depends on KPROBES && HAVE_OPTPROBES
depends on !PREEMPT
- select KALLSYMS_ALL
config HAVE_EFFICIENT_UNALIGNED_ACCESS
bool
diff --git a/arch/alpha/include/asm/cacheflush.h b/arch/alpha/include/asm/cacheflush.h
index 01d71e1..012f124 100644
--- a/arch/alpha/include/asm/cacheflush.h
+++ b/arch/alpha/include/asm/cacheflush.h
@@ -43,6 +43,8 @@
/* ??? Ought to use this in arch/alpha/kernel/signal.c too. */
#ifndef CONFIG_SMP
+#include <linux/sched.h>
+
extern void __load_new_mm_context(struct mm_struct *);
static inline void
flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index 804e531..058937b 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -449,10 +449,13 @@
#define __NR_pwritev 491
#define __NR_rt_tgsigqueueinfo 492
#define __NR_perf_event_open 493
+#define __NR_fanotify_init 494
+#define __NR_fanotify_mark 495
+#define __NR_prlimit64 496
#ifdef __KERNEL__
-#define NR_SYSCALLS 494
+#define NR_SYSCALLS 497
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
@@ -463,6 +466,7 @@
#define __ARCH_WANT_SYS_OLD_GETRLIMIT
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
+#define __ARCH_WANT_SYS_RT_SIGSUSPEND
/* "Conditional" syscalls. What we want is
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
index b45d913..6d159ce 100644
--- a/arch/alpha/kernel/entry.S
+++ b/arch/alpha/kernel/entry.S
@@ -73,8 +73,6 @@
ldq $20, HAE_REG($19); \
stq $21, HAE_CACHE($19); \
stq $21, 0($20); \
- ldq $0, 0($sp); \
- ldq $1, 8($sp); \
99:; \
ldq $19, 72($sp); \
ldq $20, 80($sp); \
@@ -316,19 +314,24 @@
cmovne $26, 0, $19 /* $19 = 0 => non-restartable */
ldq $0, SP_OFF($sp)
and $0, 8, $0
- beq $0, restore_all
-ret_from_reschedule:
+ beq $0, ret_to_kernel
+ret_to_user:
/* Make sure need_resched and sigpending don't change between
sampling and the rti. */
lda $16, 7
call_pal PAL_swpipl
ldl $5, TI_FLAGS($8)
and $5, _TIF_WORK_MASK, $2
- bne $5, work_pending
+ bne $2, work_pending
restore_all:
RESTORE_ALL
call_pal PAL_rti
+ret_to_kernel:
+ lda $16, 7
+ call_pal PAL_swpipl
+ br restore_all
+
.align 3
$syscall_error:
/*
@@ -363,7 +366,7 @@
* $8: current.
* $19: The old syscall number, or zero if this is not a return
* from a syscall that errored and is possibly restartable.
- * $20: Error indication.
+ * $20: The old a3 value
*/
.align 4
@@ -392,12 +395,18 @@
$work_notifysig:
mov $sp, $16
- br $1, do_switch_stack
+ bsr $1, do_switch_stack
mov $sp, $17
mov $5, $18
+ mov $19, $9 /* save old syscall number */
+ mov $20, $10 /* save old a3 */
+ and $5, _TIF_SIGPENDING, $2
+ cmovne $2, 0, $9 /* we don't want double syscall restarts */
jsr $26, do_notify_resume
+ mov $9, $19
+ mov $10, $20
bsr $1, undo_switch_stack
- br restore_all
+ br ret_to_user
.end work_pending
/*
@@ -430,6 +439,7 @@
beq $1, 1f
ldq $27, 0($2)
1: jsr $26, ($27), sys_gettimeofday
+ret_from_straced:
ldgp $gp, 0($26)
/* check return.. */
@@ -650,7 +660,7 @@
/* We don't actually care for a3 success widgetry in the kernel.
Not for positive errno values. */
stq $0, 0($sp) /* $0 */
- br restore_all
+ br ret_to_kernel
.end kernel_thread
/*
@@ -757,11 +767,15 @@
.ent sys_sigreturn
sys_sigreturn:
.prologue 0
+ lda $9, ret_from_straced
+ cmpult $26, $9, $9
mov $sp, $17
lda $18, -SWITCH_STACK_SIZE($sp)
lda $sp, -SWITCH_STACK_SIZE($sp)
jsr $26, do_sigreturn
- br $1, undo_switch_stack
+ bne $9, 1f
+ jsr $26, syscall_trace
+1: br $1, undo_switch_stack
br ret_from_sys_call
.end sys_sigreturn
@@ -770,47 +784,19 @@
.ent sys_rt_sigreturn
sys_rt_sigreturn:
.prologue 0
+ lda $9, ret_from_straced
+ cmpult $26, $9, $9
mov $sp, $17
lda $18, -SWITCH_STACK_SIZE($sp)
lda $sp, -SWITCH_STACK_SIZE($sp)
jsr $26, do_rt_sigreturn
- br $1, undo_switch_stack
+ bne $9, 1f
+ jsr $26, syscall_trace
+1: br $1, undo_switch_stack
br ret_from_sys_call
.end sys_rt_sigreturn
.align 4
- .globl sys_sigsuspend
- .ent sys_sigsuspend
-sys_sigsuspend:
- .prologue 0
- mov $sp, $17
- br $1, do_switch_stack
- mov $sp, $18
- subq $sp, 16, $sp
- stq $26, 0($sp)
- jsr $26, do_sigsuspend
- ldq $26, 0($sp)
- lda $sp, SWITCH_STACK_SIZE+16($sp)
- ret
-.end sys_sigsuspend
-
- .align 4
- .globl sys_rt_sigsuspend
- .ent sys_rt_sigsuspend
-sys_rt_sigsuspend:
- .prologue 0
- mov $sp, $18
- br $1, do_switch_stack
- mov $sp, $19
- subq $sp, 16, $sp
- stq $26, 0($sp)
- jsr $26, do_rt_sigsuspend
- ldq $26, 0($sp)
- lda $sp, SWITCH_STACK_SIZE+16($sp)
- ret
-.end sys_rt_sigsuspend
-
- .align 4
.globl sys_sethae
.ent sys_sethae
sys_sethae:
@@ -929,15 +915,6 @@
.end sys_execve
.align 4
- .globl osf_sigprocmask
- .ent osf_sigprocmask
-osf_sigprocmask:
- .prologue 0
- mov $sp, $18
- jmp $31, sys_osf_sigprocmask
-.end osf_sigprocmask
-
- .align 4
.globl alpha_ni_syscall
.ent alpha_ni_syscall
alpha_ni_syscall:
diff --git a/arch/alpha/kernel/err_ev6.c b/arch/alpha/kernel/err_ev6.c
index 8ca6345..253cf1a 100644
--- a/arch/alpha/kernel/err_ev6.c
+++ b/arch/alpha/kernel/err_ev6.c
@@ -90,11 +90,13 @@
ev6_parse_cbox(u64 c_addr, u64 c1_syn, u64 c2_syn,
u64 c_stat, u64 c_sts, int print)
{
- char *sourcename[] = { "UNKNOWN", "UNKNOWN", "UNKNOWN",
- "MEMORY", "BCACHE", "DCACHE",
- "BCACHE PROBE", "BCACHE PROBE" };
- char *streamname[] = { "D", "I" };
- char *bitsname[] = { "SINGLE", "DOUBLE" };
+ static const char * const sourcename[] = {
+ "UNKNOWN", "UNKNOWN", "UNKNOWN",
+ "MEMORY", "BCACHE", "DCACHE",
+ "BCACHE PROBE", "BCACHE PROBE"
+ };
+ static const char * const streamname[] = { "D", "I" };
+ static const char * const bitsname[] = { "SINGLE", "DOUBLE" };
int status = MCHK_DISPOSITION_REPORT;
int source = -1, stream = -1, bits = -1;
diff --git a/arch/alpha/kernel/err_marvel.c b/arch/alpha/kernel/err_marvel.c
index 5c905aa..648ae88 100644
--- a/arch/alpha/kernel/err_marvel.c
+++ b/arch/alpha/kernel/err_marvel.c
@@ -589,22 +589,23 @@
static void
marvel_print_pox_trans_sum(u64 trans_sum)
{
- char *pcix_cmd[] = { "Interrupt Acknowledge",
- "Special Cycle",
- "I/O Read",
- "I/O Write",
- "Reserved",
- "Reserved / Device ID Message",
- "Memory Read",
- "Memory Write",
- "Reserved / Alias to Memory Read Block",
- "Reserved / Alias to Memory Write Block",
- "Configuration Read",
- "Configuration Write",
- "Memory Read Multiple / Split Completion",
- "Dual Address Cycle",
- "Memory Read Line / Memory Read Block",
- "Memory Write and Invalidate / Memory Write Block"
+ static const char * const pcix_cmd[] = {
+ "Interrupt Acknowledge",
+ "Special Cycle",
+ "I/O Read",
+ "I/O Write",
+ "Reserved",
+ "Reserved / Device ID Message",
+ "Memory Read",
+ "Memory Write",
+ "Reserved / Alias to Memory Read Block",
+ "Reserved / Alias to Memory Write Block",
+ "Configuration Read",
+ "Configuration Write",
+ "Memory Read Multiple / Split Completion",
+ "Dual Address Cycle",
+ "Memory Read Line / Memory Read Block",
+ "Memory Write and Invalidate / Memory Write Block"
};
#define IO7__POX_TRANSUM__PCI_ADDR__S (0)
diff --git a/arch/alpha/kernel/err_titan.c b/arch/alpha/kernel/err_titan.c
index f7ed97c..c3b3781 100644
--- a/arch/alpha/kernel/err_titan.c
+++ b/arch/alpha/kernel/err_titan.c
@@ -75,8 +75,12 @@
int status = MCHK_DISPOSITION_REPORT;
#ifdef CONFIG_VERBOSE_MCHECK
- char *serror_src[] = {"GPCI", "APCI", "AGP HP", "AGP LP"};
- char *serror_cmd[] = {"DMA Read", "DMA RMW", "SGTE Read", "Reserved"};
+ static const char * const serror_src[] = {
+ "GPCI", "APCI", "AGP HP", "AGP LP"
+ };
+ static const char * const serror_cmd[] = {
+ "DMA Read", "DMA RMW", "SGTE Read", "Reserved"
+ };
#endif /* CONFIG_VERBOSE_MCHECK */
#define TITAN__PCHIP_SERROR__LOST_UECC (1UL << 0)
@@ -140,14 +144,15 @@
int status = MCHK_DISPOSITION_REPORT;
#ifdef CONFIG_VERBOSE_MCHECK
- char *perror_cmd[] = { "Interrupt Acknowledge", "Special Cycle",
- "I/O Read", "I/O Write",
- "Reserved", "Reserved",
- "Memory Read", "Memory Write",
- "Reserved", "Reserved",
- "Configuration Read", "Configuration Write",
- "Memory Read Multiple", "Dual Address Cycle",
- "Memory Read Line","Memory Write and Invalidate"
+ static const char * const perror_cmd[] = {
+ "Interrupt Acknowledge", "Special Cycle",
+ "I/O Read", "I/O Write",
+ "Reserved", "Reserved",
+ "Memory Read", "Memory Write",
+ "Reserved", "Reserved",
+ "Configuration Read", "Configuration Write",
+ "Memory Read Multiple", "Dual Address Cycle",
+ "Memory Read Line", "Memory Write and Invalidate"
};
#endif /* CONFIG_VERBOSE_MCHECK */
@@ -273,11 +278,11 @@
int cmd, len;
unsigned long addr;
- char *agperror_cmd[] = { "Read (low-priority)", "Read (high-priority)",
- "Write (low-priority)",
- "Write (high-priority)",
- "Reserved", "Reserved",
- "Flush", "Fence"
+ static const char * const agperror_cmd[] = {
+ "Read (low-priority)", "Read (high-priority)",
+ "Write (low-priority)", "Write (high-priority)",
+ "Reserved", "Reserved",
+ "Flush", "Fence"
};
#endif /* CONFIG_VERBOSE_MCHECK */
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 5d1e6d6..547e8b8 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/syscalls.h>
#include <linux/unistd.h>
@@ -69,7 +68,6 @@
{
struct mm_struct *mm;
- lock_kernel();
mm = current->mm;
mm->end_code = bss_start + bss_len;
mm->start_brk = bss_start + bss_len;
@@ -78,7 +76,6 @@
printk("set_program_attributes(%lx %lx %lx %lx)\n",
text_start, text_len, bss_start, bss_len);
#endif
- unlock_kernel();
return 0;
}
@@ -517,7 +514,6 @@
long error;
int __user *min_buf_size_ptr;
- lock_kernel();
switch (code) {
case PL_SET:
if (get_user(error, &args->set.nbytes))
@@ -547,7 +543,6 @@
error = -EOPNOTSUPP;
break;
};
- unlock_kernel();
return error;
}
@@ -594,7 +589,7 @@
SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
{
- char *sysinfo_table[] = {
+ const char *sysinfo_table[] = {
utsname()->sysname,
utsname()->nodename,
utsname()->release,
@@ -606,7 +601,7 @@
"dummy", /* secure RPC domain */
};
unsigned long offset;
- char *res;
+ const char *res;
long len, err = -EINVAL;
offset = command-1;
diff --git a/arch/alpha/kernel/pci-sysfs.c b/arch/alpha/kernel/pci-sysfs.c
index 738fc82..b899e95 100644
--- a/arch/alpha/kernel/pci-sysfs.c
+++ b/arch/alpha/kernel/pci-sysfs.c
@@ -66,7 +66,7 @@
{
struct pci_dev *pdev = to_pci_dev(container_of(kobj,
struct device, kobj));
- struct resource *res = (struct resource *)attr->private;
+ struct resource *res = attr->private;
enum pci_mmap_state mmap_type;
struct pci_bus_region bar;
int i;
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 842dba3..3ec3506 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -356,7 +356,7 @@
dest[27] = pt->r27;
dest[28] = pt->r28;
dest[29] = pt->gp;
- dest[30] = rdusp();
+ dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp;
dest[31] = pt->pc;
/* Once upon a time this was the PS value. Which is stupid
diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c
index 0932dbb..d290845 100644
--- a/arch/alpha/kernel/signal.c
+++ b/arch/alpha/kernel/signal.c
@@ -41,46 +41,20 @@
/*
* The OSF/1 sigprocmask calling sequence is different from the
* C sigprocmask() sequence..
- *
- * how:
- * 1 - SIG_BLOCK
- * 2 - SIG_UNBLOCK
- * 3 - SIG_SETMASK
- *
- * We change the range to -1 .. 1 in order to let gcc easily
- * use the conditional move instructions.
- *
- * Note that we don't need to acquire the kernel lock for SMP
- * operation, as all of this is local to this thread.
*/
-SYSCALL_DEFINE3(osf_sigprocmask, int, how, unsigned long, newmask,
- struct pt_regs *, regs)
+SYSCALL_DEFINE2(osf_sigprocmask, int, how, unsigned long, newmask)
{
- unsigned long oldmask = -EINVAL;
+ sigset_t oldmask;
+ sigset_t mask;
+ unsigned long res;
- if ((unsigned long)how-1 <= 2) {
- long sign = how-2; /* -1 .. 1 */
- unsigned long block, unblock;
-
- newmask &= _BLOCKABLE;
- spin_lock_irq(¤t->sighand->siglock);
- oldmask = current->blocked.sig[0];
-
- unblock = oldmask & ~newmask;
- block = oldmask | newmask;
- if (!sign)
- block = unblock;
- if (sign <= 0)
- newmask = block;
- if (_NSIG_WORDS > 1 && sign > 0)
- sigemptyset(¤t->blocked);
- current->blocked.sig[0] = newmask;
- recalc_sigpending();
- spin_unlock_irq(¤t->sighand->siglock);
-
- regs->r0 = 0; /* special no error return */
+ siginitset(&mask, newmask & ~_BLOCKABLE);
+ res = sigprocmask(how, &mask, &oldmask);
+ if (!res) {
+ force_successful_syscall_return();
+ res = oldmask.sig[0];
}
- return oldmask;
+ return res;
}
SYSCALL_DEFINE3(osf_sigaction, int, sig,
@@ -94,9 +68,9 @@
old_sigset_t mask;
if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
- __get_user(new_ka.sa.sa_flags, &act->sa_flags))
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
+ __get_user(mask, &act->sa_mask))
return -EFAULT;
- __get_user(mask, &act->sa_mask);
siginitset(&new_ka.sa.sa_mask, mask);
new_ka.ka_restorer = NULL;
}
@@ -106,9 +80,9 @@
if (!ret && oact) {
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
- __put_user(old_ka.sa.sa_flags, &oact->sa_flags))
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
return -EFAULT;
- __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
}
return ret;
@@ -144,8 +118,7 @@
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
-asmlinkage int
-do_sigsuspend(old_sigset_t mask, struct pt_regs *regs, struct switch_stack *sw)
+SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
{
mask &= _BLOCKABLE;
spin_lock_irq(¤t->sighand->siglock);
@@ -154,41 +127,6 @@
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
- /* Indicate EINTR on return from any possible signal handler,
- which will not come back through here, but via sigreturn. */
- regs->r0 = EINTR;
- regs->r19 = 1;
-
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- set_thread_flag(TIF_RESTORE_SIGMASK);
- return -ERESTARTNOHAND;
-}
-
-asmlinkage int
-do_rt_sigsuspend(sigset_t __user *uset, size_t sigsetsize,
- struct pt_regs *regs, struct switch_stack *sw)
-{
- sigset_t set;
-
- /* XXX: Don't preclude handling different sized sigset_t's. */
- if (sigsetsize != sizeof(sigset_t))
- return -EINVAL;
- if (copy_from_user(&set, uset, sizeof(set)))
- return -EFAULT;
-
- sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(¤t->sighand->siglock);
- current->saved_sigmask = current->blocked;
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(¤t->sighand->siglock);
-
- /* Indicate EINTR on return from any possible signal handler,
- which will not come back through here, but via sigreturn. */
- regs->r0 = EINTR;
- regs->r19 = 1;
-
current->state = TASK_INTERRUPTIBLE;
schedule();
set_thread_flag(TIF_RESTORE_SIGMASK);
@@ -239,6 +177,8 @@
unsigned long usp;
long i, err = __get_user(regs->pc, &sc->sc_pc);
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
sw->r26 = (unsigned long) ret_from_sys_call;
err |= __get_user(regs->r0, sc->sc_regs+0);
@@ -591,7 +531,6 @@
regs->pc -= 4;
break;
case ERESTART_RESTARTBLOCK:
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
regs->r0 = EINTR;
break;
}
diff --git a/arch/alpha/kernel/srm_env.c b/arch/alpha/kernel/srm_env.c
index 4afc1a1..f0df3fb 100644
--- a/arch/alpha/kernel/srm_env.c
+++ b/arch/alpha/kernel/srm_env.c
@@ -87,7 +87,7 @@
srm_env_t *entry;
char *page;
- entry = (srm_env_t *)m->private;
+ entry = m->private;
page = (char *)__get_free_page(GFP_USER);
if (!page)
return -ENOMEM;
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index 09acb78..a6a1de9 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -58,7 +58,7 @@
.quad sys_open /* 45 */
.quad alpha_ni_syscall
.quad sys_getxgid
- .quad osf_sigprocmask
+ .quad sys_osf_sigprocmask
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 50 */
.quad sys_acct
@@ -512,6 +512,9 @@
.quad sys_pwritev
.quad sys_rt_tgsigqueueinfo
.quad sys_perf_event_open
+ .quad sys_fanotify_init
+ .quad sys_fanotify_mark /* 495 */
+ .quad sys_prlimit64
.size sys_call_table, . - sys_call_table
.type sys_call_table, @object
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index eacceb2..396af17 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -191,16 +191,16 @@
write_sequnlock(&xtime_lock);
-#ifndef CONFIG_SMP
- while (nticks--)
- update_process_times(user_mode(get_irq_regs()));
-#endif
-
if (test_perf_event_pending()) {
clear_perf_event_pending();
perf_event_do_pending();
}
+#ifndef CONFIG_SMP
+ while (nticks--)
+ update_process_times(user_mode(get_irq_regs()));
+#endif
+
return IRQ_HANDLED;
}
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index b14f015..0414e02 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -13,7 +13,6 @@
#include <linux/sched.h>
#include <linux/tty.h>
#include <linux/delay.h>
-#include <linux/smp_lock.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kallsyms.h>
@@ -623,7 +622,6 @@
return;
}
- lock_kernel();
printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
pc, va, opcode, reg);
do_exit(SIGSEGV);
@@ -646,7 +644,6 @@
* Yikes! No one to forward the exception to.
* Since the registers are in a weird format, dump them ourselves.
*/
- lock_kernel();
printk("%s(%d): unhandled unaligned exception\n",
current->comm, task_pid_nr(current));
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 0fdd099..b404e5e 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -19,6 +19,8 @@
select HAVE_KPROBES if (!XIP_KERNEL)
select HAVE_KRETPROBES if (HAVE_KPROBES)
select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
+ select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)
+ select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL)
select HAVE_GENERIC_DMA_COHERENT
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZO
@@ -146,6 +148,9 @@
and that the relevant menu configurations are displayed for
it.
+config ARCH_HAS_CPU_IDLE_WAIT
+ def_bool y
+
config GENERIC_HWEIGHT
bool
default y
@@ -272,7 +277,6 @@
bool "Atmel AT91"
select ARCH_REQUIRE_GPIOLIB
select HAVE_CLK
- select ARCH_USES_GETTIMEOFFSET
help
This enables support for systems based on the Atmel AT91RM9200,
AT91SAM9 and AT91CAP9 processors.
@@ -1004,7 +1008,7 @@
config ARM_ERRATA_411920
bool "ARM errata: Invalidation of the Instruction Cache operation can fail"
- depends on CPU_V6 && !SMP
+ depends on CPU_V6
help
Invalidation of the Instruction Cache operation can
fail. This erratum is present in 1136 (before r1p4), 1156 and 1176.
@@ -1052,6 +1056,32 @@
ACTLR register. Note that setting specific bits in the ACTLR register
may not be available in non-secure mode.
+config ARM_ERRATA_742230
+ bool "ARM errata: DMB operation may be faulty"
+ depends on CPU_V7 && SMP
+ help
+ This option enables the workaround for the 742230 Cortex-A9
+ (r1p0..r2p2) erratum. Under rare circumstances, a DMB instruction
+ between two write operations may not ensure the correct visibility
+ ordering of the two writes. This workaround sets a specific bit in
+ the diagnostic register of the Cortex-A9 which causes the DMB
+ instruction to behave as a DSB, ensuring the correct behaviour of
+ the two writes.
+
+config ARM_ERRATA_742231
+ bool "ARM errata: Incorrect hazard handling in the SCU may lead to data corruption"
+ depends on CPU_V7 && SMP
+ help
+ This option enables the workaround for the 742231 Cortex-A9
+ (r2p0..r2p2) erratum. Under certain conditions, specific to the
+ Cortex-A9 MPCore micro-architecture, two CPUs working in SMP mode,
+ accessing some data located in the same cache line, may get corrupted
+ data due to bad handling of the address hazard when the line gets
+ replaced from one of the CPUs at the same time as another CPU is
+ accessing it. This workaround sets specific bits in the diagnostic
+ register of the Cortex-A9 which reduces the linefill issuing
+ capabilities of the processor.
+
config PL310_ERRATA_588369
bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
depends on CACHE_L2X0 && ARCH_OMAP4
@@ -1143,13 +1173,13 @@
config SMP
bool "Symmetric Multi-Processing (EXPERIMENTAL)"
- depends on EXPERIMENTAL && (REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP ||\
- MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 ||\
- ARCH_S5PV310 || ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS_CA9X4)
+ depends on EXPERIMENTAL
depends on GENERIC_CLOCKEVENTS
+ depends on REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP || \
+ MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 ||\
+ ARCH_S5PV310 || ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS_CA9X4
select USE_GENERIC_SMP_HELPERS
- select HAVE_ARM_SCU if ARCH_REALVIEW || ARCH_OMAP4 || ARCH_S5PV310 ||\
- ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS_CA9X4
+ select HAVE_ARM_SCU
help
This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If
@@ -1167,6 +1197,19 @@
If you don't know what to do here, say N.
+config SMP_ON_UP
+ bool "Allow booting SMP kernel on uniprocessor systems (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ depends on SMP && !XIP && !THUMB2_KERNEL
+ default y
+ help
+ SMP kernels contain instructions which fail on non-SMP processors.
+ Enabling this option allows the kernel to modify itself to make
+ these instructions safe. Disabling it allows about 1K of space
+ savings.
+
+ If you don't know what to do here, say Y.
+
config HAVE_ARM_SCU
bool
depends on SMP
@@ -1217,12 +1260,9 @@
config LOCAL_TIMERS
bool "Use local timer interrupts"
- depends on SMP && (REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || \
- REALVIEW_EB_A9MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || \
- ARCH_S5PV310 || ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS_CA9X4)
+ depends on SMP
default y
- select HAVE_ARM_TWD if ARCH_REALVIEW || ARCH_OMAP4 || ARCH_S5PV310 || \
- ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS
+ select HAVE_ARM_TWD
help
Enable support for local timers on SMP platforms, rather then the
legacy IPI broadcast method. Local timers allows the system
@@ -1577,97 +1617,6 @@
0xf8000000. This assumes the zImage being placed in the first 128MB
from start of memory.
-config ZRELADDR
- hex "Physical address of the decompressed kernel image"
- depends on !AUTO_ZRELADDR
- default 0x00008000 if ARCH_BCMRING ||\
- ARCH_CNS3XXX ||\
- ARCH_DOVE ||\
- ARCH_EBSA110 ||\
- ARCH_FOOTBRIDGE ||\
- ARCH_INTEGRATOR ||\
- ARCH_IOP13XX ||\
- ARCH_IOP33X ||\
- ARCH_IXP2000 ||\
- ARCH_IXP23XX ||\
- ARCH_IXP4XX ||\
- ARCH_KIRKWOOD ||\
- ARCH_KS8695 ||\
- ARCH_LOKI ||\
- ARCH_MMP ||\
- ARCH_MV78XX0 ||\
- ARCH_NOMADIK ||\
- ARCH_NUC93X ||\
- ARCH_NS9XXX ||\
- ARCH_ORION5X ||\
- ARCH_SPEAR3XX ||\
- ARCH_SPEAR6XX ||\
- ARCH_TEGRA ||\
- ARCH_U8500 ||\
- ARCH_VERSATILE ||\
- ARCH_W90X900
- default 0x08008000 if ARCH_MX1 ||\
- ARCH_SHARK
- default 0x10008000 if ARCH_MSM ||\
- ARCH_OMAP1 ||\
- ARCH_RPC
- default 0x20008000 if ARCH_S5P6440 ||\
- ARCH_S5P6442 ||\
- ARCH_S5PC100 ||\
- ARCH_S5PV210
- default 0x30008000 if ARCH_S3C2410 ||\
- ARCH_S3C2400 ||\
- ARCH_S3C2412 ||\
- ARCH_S3C2416 ||\
- ARCH_S3C2440 ||\
- ARCH_S3C2443
- default 0x40008000 if ARCH_STMP378X ||\
- ARCH_STMP37XX ||\
- ARCH_SH7372 ||\
- ARCH_SH7377 ||\
- ARCH_S5PV310
- default 0x50008000 if ARCH_S3C64XX ||\
- ARCH_SH7367
- default 0x60008000 if ARCH_VEXPRESS
- default 0x80008000 if ARCH_MX25 ||\
- ARCH_MX3 ||\
- ARCH_NETX ||\
- ARCH_OMAP2PLUS ||\
- ARCH_PNX4008
- default 0x90008000 if ARCH_MX5 ||\
- ARCH_MX91231
- default 0xa0008000 if ARCH_IOP32X ||\
- ARCH_PXA ||\
- MACH_MX27
- default 0xc0008000 if ARCH_LH7A40X ||\
- MACH_MX21
- default 0xf0008000 if ARCH_AAEC2000 ||\
- ARCH_L7200
- default 0xc0028000 if ARCH_CLPS711X
- default 0x70008000 if ARCH_AT91 && (ARCH_AT91CAP9 || ARCH_AT91SAM9G45)
- default 0x20008000 if ARCH_AT91 && !(ARCH_AT91CAP9 || ARCH_AT91SAM9G45)
- default 0xc0008000 if ARCH_DAVINCI && ARCH_DAVINCI_DA8XX
- default 0x80008000 if ARCH_DAVINCI && !ARCH_DAVINCI_DA8XX
- default 0x00008000 if ARCH_EP93XX && EP93XX_SDCE3_SYNC_PHYS_OFFSET
- default 0xc0008000 if ARCH_EP93XX && EP93XX_SDCE0_PHYS_OFFSET
- default 0xd0008000 if ARCH_EP93XX && EP93XX_SDCE1_PHYS_OFFSET
- default 0xe0008000 if ARCH_EP93XX && EP93XX_SDCE2_PHYS_OFFSET
- default 0xf0008000 if ARCH_EP93XX && EP93XX_SDCE3_ASYNC_PHYS_OFFSET
- default 0x00008000 if ARCH_GEMINI && GEMINI_MEM_SWAP
- default 0x10008000 if ARCH_GEMINI && !GEMINI_MEM_SWAP
- default 0x70008000 if ARCH_REALVIEW && REALVIEW_HIGH_PHYS_OFFSET
- default 0x00008000 if ARCH_REALVIEW && !REALVIEW_HIGH_PHYS_OFFSET
- default 0xc0208000 if ARCH_SA1100 && SA1111
- default 0xc0008000 if ARCH_SA1100 && !SA1111
- default 0x30108000 if ARCH_S3C2410 && PM_H1940
- default 0x28E08000 if ARCH_U300 && MACH_U300_SINGLE_RAM
- default 0x48008000 if ARCH_U300 && !MACH_U300_SINGLE_RAM
- help
- ZRELADDR is the physical address where the decompressed kernel
- image will be placed. ZRELADDR has to be specified when the
- assumption of AUTO_ZRELADDR is not valid, or when ZBOOT_ROM is
- selected.
-
endmenu
menu "CPU Power Management"
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 91344af..4dbce53 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -27,6 +27,11 @@
the performance is not affected. Currently, this feature
only works with EABI compilers. If unsure say Y.
+config OLD_MCOUNT
+ bool
+ depends on FUNCTION_TRACER && FRAME_POINTER
+ default y
+
config DEBUG_USER
bool "Verbose user fault messages"
help
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index f705213..4a590f4 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -14,16 +14,18 @@
MKIMAGE := $(srctree)/scripts/mkuboot.sh
ifneq ($(MACHINE),)
--include $(srctree)/$(MACHINE)/Makefile.boot
+include $(srctree)/$(MACHINE)/Makefile.boot
endif
# Note: the following conditions must always be true:
+# ZRELADDR == virt_to_phys(PAGE_OFFSET + TEXT_OFFSET)
# PARAMS_PHYS must be within 4MB of ZRELADDR
# INITRD_PHYS must be in RAM
+ZRELADDR := $(zreladdr-y)
PARAMS_PHYS := $(params_phys-y)
INITRD_PHYS := $(initrd_phys-y)
-export INITRD_PHYS PARAMS_PHYS
+export ZRELADDR INITRD_PHYS PARAMS_PHYS
targets := Image zImage xipImage bootpImage uImage
@@ -65,7 +67,7 @@
ifeq ($(CONFIG_ZBOOT_ROM),y)
$(obj)/uImage: LOADADDR=$(CONFIG_ZBOOT_ROM_TEXT)
else
-$(obj)/uImage: LOADADDR=$(CONFIG_ZRELADDR)
+$(obj)/uImage: LOADADDR=$(ZRELADDR)
endif
ifeq ($(CONFIG_THUMB2_KERNEL),y)
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 68775e3..65a7c1c 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -79,6 +79,10 @@
EXTRA_CFLAGS := -fpic -fno-builtin
EXTRA_AFLAGS := -Wa,-march=all
+# Supply ZRELADDR to the decompressor via a linker symbol.
+ifneq ($(CONFIG_AUTO_ZRELADDR),y)
+LDFLAGS_vmlinux := --defsym zreladdr=$(ZRELADDR)
+endif
ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
LDFLAGS_vmlinux += --be8
endif
@@ -112,5 +116,5 @@
$(obj)/font.c: $(FONTC)
$(call cmd,shipped)
-$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile .config
+$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG)
@sed "$(SEDFLAGS)" < $< > $@
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 6af9907..6825c34 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -177,7 +177,7 @@
and r4, pc, #0xf8000000
add r4, r4, #TEXT_OFFSET
#else
- ldr r4, =CONFIG_ZRELADDR
+ ldr r4, =zreladdr
#endif
subs r0, r0, r1 @ calculate the delta offset
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c
index 6c09135..1bec96e 100644
--- a/arch/arm/common/it8152.c
+++ b/arch/arm/common/it8152.c
@@ -263,6 +263,22 @@
return 0;
}
+int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
+{
+ dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
+ __func__, dma_addr, size);
+ return (dev->bus == &pci_bus_type) &&
+ ((dma_addr + size - PHYS_OFFSET) >= SZ_64M);
+}
+
+int dma_set_coherent_mask(struct device *dev, u64 mask)
+{
+ if (mask >= PHYS_OFFSET + SZ_64M - 1)
+ return 0;
+
+ return -EIO;
+}
+
int __init it8152_pci_setup(int nr, struct pci_sys_data *sys)
{
it8152_io.start = IT8152_IO_BASE + 0x12000;
diff --git a/arch/arm/common/pl330.c b/arch/arm/common/pl330.c
index 5ebbab6..8f0f86d 100644
--- a/arch/arm/common/pl330.c
+++ b/arch/arm/common/pl330.c
@@ -146,8 +146,7 @@
#define DESIGNER 0x41
#define REVISION 0x0
#define INTEG_CFG 0x0
-#define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12) \
- | (REVISION << 20) | (INTEG_CFG << 24))
+#define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
#define PCELL_ID_VAL 0xb105f00d
@@ -1859,10 +1858,10 @@
regs = pi->base;
/* Check if we can handle this DMAC */
- if (get_id(pi, PERIPH_ID) != PERIPH_ID_VAL
+ if ((get_id(pi, PERIPH_ID) & 0xfffff) != PERIPH_ID_VAL
|| get_id(pi, PCELL_ID) != PCELL_ID_VAL) {
dev_err(pi->dev, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n",
- readl(regs + PERIPH_ID), readl(regs + PCELL_ID));
+ get_id(pi, PERIPH_ID), get_id(pi, PCELL_ID));
return -EINVAL;
}
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index 517d50d..c0258a8 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -678,7 +678,7 @@
* %-EBUSY physical address already marked in-use.
* %0 successful.
*/
-static int
+static int __devinit
__sa1111_probe(struct device *me, struct resource *mem, int irq)
{
struct sa1111 *sachip;
diff --git a/arch/arm/configs/realview-smp_defconfig b/arch/arm/configs/realview-smp_defconfig
index 9312ef9..5ca7a61 100644
--- a/arch/arm/configs/realview-smp_defconfig
+++ b/arch/arm/configs/realview-smp_defconfig
@@ -39,6 +39,7 @@
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_ARM_INTEGRATOR=y
+CONFIG_ARM_CHARLCD=y
CONFIG_NETDEVICES=y
CONFIG_SMSC_PHY=y
CONFIG_NET_ETHERNET=y
@@ -52,10 +53,13 @@
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
CONFIG_LEGACY_PTY_COUNT=16
# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_VERSATILE=y
+CONFIG_SPI=y
+CONFIG_GPIOLIB=y
# CONFIG_HWMON is not set
CONFIG_FB=y
CONFIG_FB_ARMCLCD=y
-# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
@@ -70,7 +74,13 @@
# CONFIG_USB_SUPPORT is not set
CONFIG_MMC=y
CONFIG_MMC_ARMMMCI=y
-CONFIG_INOTIFY=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_DS1307=y
+CONFIG_RTC_DRV_PL031=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
@@ -80,6 +90,7 @@
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/arm/configs/realview_defconfig b/arch/arm/configs/realview_defconfig
index fb75192..fcaa603 100644
--- a/arch/arm/configs/realview_defconfig
+++ b/arch/arm/configs/realview_defconfig
@@ -38,6 +38,7 @@
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_ARM_INTEGRATOR=y
+CONFIG_ARM_CHARLCD=y
CONFIG_NETDEVICES=y
CONFIG_SMSC_PHY=y
CONFIG_NET_ETHERNET=y
@@ -51,10 +52,13 @@
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
CONFIG_LEGACY_PTY_COUNT=16
# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_VERSATILE=y
+CONFIG_SPI=y
+CONFIG_GPIOLIB=y
# CONFIG_HWMON is not set
CONFIG_FB=y
CONFIG_FB_ARMCLCD=y
-# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
@@ -69,7 +73,13 @@
# CONFIG_USB_SUPPORT is not set
CONFIG_MMC=y
CONFIG_MMC_ARMMMCI=y
-CONFIG_INOTIFY=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_DS1307=y
+CONFIG_RTC_DRV_PL031=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
@@ -79,6 +89,7 @@
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/arm/configs/u300_defconfig b/arch/arm/configs/u300_defconfig
index 46e5e07..c1c252c 100644
--- a/arch/arm/configs/u300_defconfig
+++ b/arch/arm/configs/u300_defconfig
@@ -28,26 +28,9 @@
CONFIG_FPE_NWFPE=y
CONFIG_PM=y
# CONFIG_SUSPEND is not set
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
-# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_ECC_SMC=y
+# CONFIG_MISC_DEVICES is not set
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
@@ -58,7 +41,6 @@
CONFIG_LEGACY_PTY_COUNT=16
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
-CONFIG_POWER_SUPPLY=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_REGULATOR=y
@@ -66,24 +48,10 @@
CONFIG_BACKLIGHT_LCD_SUPPORT=y
# CONFIG_LCD_CLASS_DEVICE is not set
CONFIG_BACKLIGHT_CLASS_DEVICE=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_SOUND=y
-CONFIG_SND=y
-# CONFIG_SND_SUPPORT_OLD_API is not set
-# CONFIG_SND_VERBOSE_PROCFS is not set
-# CONFIG_SND_DRIVERS is not set
-# CONFIG_SND_ARM is not set
-# CONFIG_SND_SPI is not set
-CONFIG_SND_SOC=y
# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_MMC=y
-CONFIG_MMC_DEBUG=y
CONFIG_MMC_ARMMMCI=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_BACKLIGHT=y
CONFIG_RTC_CLASS=y
# CONFIG_RTC_HCTOSYS is not set
CONFIG_RTC_DRV_COH901331=y
@@ -93,12 +61,11 @@
CONFIG_FUSE_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
-# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
-# CONFIG_DETECT_SOFTLOCKUP is not set
# CONFIG_SCHED_DEBUG is not set
CONFIG_TIMER_STATS=y
# CONFIG_DEBUG_PREEMPT is not set
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 6e8f05c..062b58c 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -154,16 +154,39 @@
.long 9999b,9001f; \
.popsection
+#ifdef CONFIG_SMP
+#define ALT_SMP(instr...) \
+9998: instr
+#define ALT_UP(instr...) \
+ .pushsection ".alt.smp.init", "a" ;\
+ .long 9998b ;\
+ instr ;\
+ .popsection
+#define ALT_UP_B(label) \
+ .equ up_b_offset, label - 9998b ;\
+ .pushsection ".alt.smp.init", "a" ;\
+ .long 9998b ;\
+ b . + up_b_offset ;\
+ .popsection
+#else
+#define ALT_SMP(instr...)
+#define ALT_UP(instr...) instr
+#define ALT_UP_B(label) b label
+#endif
+
/*
* SMP data memory barrier
*/
.macro smp_dmb
#ifdef CONFIG_SMP
#if __LINUX_ARM_ARCH__ >= 7
- dmb
+ ALT_SMP(dmb)
#elif __LINUX_ARM_ARCH__ == 6
- mcr p15, 0, r0, c7, c10, 5 @ dmb
+ ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
+#else
+#error Incompatible SMP platform
#endif
+ ALT_UP(nop)
#endif
.endm
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 4656a24..3acd8fa 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -137,10 +137,10 @@
#endif
/*
- * This flag is used to indicate that the page pointed to by a pte
- * is dirty and requires cleaning before returning it to the user.
+ * This flag is used to indicate that the page pointed to by a pte is clean
+ * and does not require cleaning before returning it to the user.
*/
-#define PG_dcache_dirty PG_arch_1
+#define PG_dcache_clean PG_arch_1
/*
* MM Cache Management
@@ -156,6 +156,12 @@
* Please note that the implementation of these, and the required
* effects are cache-type (VIVT/VIPT/PIPT) specific.
*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ * Currently only needed for cache-v6.S and cache-v7.S, see
+ * __flush_icache_all for the generic implementation.
+ *
* flush_kern_all()
*
* Unconditionally clean and invalidate the entire cache.
@@ -206,6 +212,7 @@
*/
struct cpu_cache_fns {
+ void (*flush_icache_all)(void);
void (*flush_kern_all)(void);
void (*flush_user_all)(void);
void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
@@ -227,6 +234,7 @@
extern struct cpu_cache_fns cpu_cache;
+#define __cpuc_flush_icache_all cpu_cache.flush_icache_all
#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
#define __cpuc_flush_user_all cpu_cache.flush_user_all
#define __cpuc_flush_user_range cpu_cache.flush_user_range
@@ -246,6 +254,7 @@
#else
+#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
@@ -253,6 +262,7 @@
#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
+extern void __cpuc_flush_icache_all(void);
extern void __cpuc_flush_kern_all(void);
extern void __cpuc_flush_user_all(void);
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
@@ -291,6 +301,37 @@
/*
* Convert calls to our calling convention.
*/
+
+/* Invalidate I-cache */
+#define __flush_icache_all_generic() \
+ asm("mcr p15, 0, %0, c7, c5, 0" \
+ : : "r" (0));
+
+/* Invalidate I-cache inner shareable */
+#define __flush_icache_all_v7_smp() \
+ asm("mcr p15, 0, %0, c7, c1, 0" \
+ : : "r" (0));
+
+/*
+ * Optimized __flush_icache_all for the common cases. Note that UP ARMv7
+ * will fall through to use __flush_icache_all_generic.
+ */
+#if (defined(CONFIG_CPU_V7) && defined(CONFIG_CPU_V6)) || \
+ defined(CONFIG_SMP_ON_UP)
+#define __flush_icache_preferred __cpuc_flush_icache_all
+#elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
+#define __flush_icache_preferred __flush_icache_all_v7_smp
+#elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)
+#define __flush_icache_preferred __cpuc_flush_icache_all
+#else
+#define __flush_icache_preferred __flush_icache_all_generic
+#endif
+
+static inline void __flush_icache_all(void)
+{
+ __flush_icache_preferred();
+}
+
#define flush_cache_all() __cpuc_flush_kern_all()
static inline void vivt_flush_cache_mm(struct mm_struct *mm)
@@ -366,21 +407,6 @@
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *);
-static inline void __flush_icache_all(void)
-{
-#ifdef CONFIG_ARM_ERRATA_411920
- extern void v6_icache_inval_all(void);
- v6_icache_inval_all();
-#elif defined(CONFIG_SMP) && __LINUX_ARM_ARCH__ >= 7
- asm("mcr p15, 0, %0, c7, c1, 0 @ invalidate I-cache inner shareable\n"
- :
- : "r" (0));
-#else
- asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
- :
- : "r" (0));
-#endif
-}
static inline void flush_kernel_vmap_range(void *addr, int size)
{
if ((cache_is_vivt() || cache_is_vipt_aliasing()))
@@ -405,9 +431,6 @@
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
static inline void flush_kernel_dcache_page(struct page *page)
{
- /* highmem pages are always flushed upon kunmap already */
- if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page))
- __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
}
#define flush_dcache_mmap_lock(mapping) \
diff --git a/arch/arm/include/asm/cachetype.h b/arch/arm/include/asm/cachetype.h
index d3a4c2c..c023db0 100644
--- a/arch/arm/include/asm/cachetype.h
+++ b/arch/arm/include/asm/cachetype.h
@@ -6,6 +6,7 @@
#define CACHEID_VIPT_ALIASING (1 << 2)
#define CACHEID_VIPT (CACHEID_VIPT_ALIASING|CACHEID_VIPT_NONALIASING)
#define CACHEID_ASID_TAGGED (1 << 3)
+#define CACHEID_VIPT_I_ALIASING (1 << 4)
extern unsigned int cacheid;
@@ -14,15 +15,18 @@
#define cache_is_vipt_nonaliasing() cacheid_is(CACHEID_VIPT_NONALIASING)
#define cache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_ALIASING)
#define icache_is_vivt_asid_tagged() cacheid_is(CACHEID_ASID_TAGGED)
+#define icache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_I_ALIASING)
/*
* __LINUX_ARM_ARCH__ is the minimum supported CPU architecture
* Mask out support which will never be present on newer CPUs.
* - v6+ is never VIVT
- * - v7+ VIPT never aliases
+ * - v7+ VIPT never aliases on D-side
*/
#if __LINUX_ARM_ARCH__ >= 7
-#define __CACHEID_ARCH_MIN (CACHEID_VIPT_NONALIASING | CACHEID_ASID_TAGGED)
+#define __CACHEID_ARCH_MIN (CACHEID_VIPT_NONALIASING |\
+ CACHEID_ASID_TAGGED |\
+ CACHEID_VIPT_I_ALIASING)
#elif __LINUX_ARM_ARCH__ >= 6
#define __CACHEID_ARCH_MIN (~CACHEID_VIVT)
#else
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index c226fe1..c568da7 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -288,15 +288,7 @@
* DMA access and 1 if the buffer needs to be bounced.
*
*/
-#ifdef CONFIG_SA1111
extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
-#else
-static inline int dma_needs_bounce(struct device *dev, dma_addr_t addr,
- size_t size)
-{
- return 0;
-}
-#endif
/*
* The DMA API, implemented by dmabounce.c. See below for descriptions.
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index 103f7ee..f89515a 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -2,12 +2,30 @@
#define _ASM_ARM_FTRACE
#ifdef CONFIG_FUNCTION_TRACER
-#define MCOUNT_ADDR ((long)(mcount))
+#define MCOUNT_ADDR ((unsigned long)(__gnu_mcount_nc))
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
#ifndef __ASSEMBLY__
extern void mcount(void);
extern void __gnu_mcount_nc(void);
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+struct dyn_arch_ftrace {
+#ifdef CONFIG_OLD_MCOUNT
+ bool old_mcount;
+#endif
+};
+
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+ /* With Thumb-2, the recorded addresses have the lsb set */
+ return addr & ~1;
+}
+
+extern void ftrace_caller_old(void);
+extern void ftrace_call_old(void);
+#endif
+
#endif
#endif
diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h
index 212e478..7ecd793 100644
--- a/arch/arm/include/asm/hardware/coresight.h
+++ b/arch/arm/include/asm/hardware/coresight.h
@@ -21,18 +21,6 @@
#define TRACER_RUNNING BIT(TRACER_RUNNING_BIT)
#define TRACER_CYCLE_ACC BIT(TRACER_CYCLE_ACC_BIT)
-struct tracectx {
- unsigned int etb_bufsz;
- void __iomem *etb_regs;
- void __iomem *etm_regs;
- unsigned long flags;
- int ncmppairs;
- int etm_portsz;
- struct device *dev;
- struct clk *emu_clk;
- struct mutex mutex;
-};
-
#define TRACER_TIMEOUT 10000
#define etm_writel(t, v, x) \
@@ -112,10 +100,10 @@
/* ETM status register, "ETM Architecture", 3.3.2 */
#define ETMR_STATUS (0x10)
-#define ETMST_OVERFLOW (1 << 0)
-#define ETMST_PROGBIT (1 << 1)
-#define ETMST_STARTSTOP (1 << 2)
-#define ETMST_TRIGGER (1 << 3)
+#define ETMST_OVERFLOW BIT(0)
+#define ETMST_PROGBIT BIT(1)
+#define ETMST_STARTSTOP BIT(2)
+#define ETMST_TRIGGER BIT(3)
#define etm_progbit(t) (etm_readl((t), ETMR_STATUS) & ETMST_PROGBIT)
#define etm_started(t) (etm_readl((t), ETMR_STATUS) & ETMST_STARTSTOP)
@@ -123,7 +111,7 @@
#define ETMR_TRACEENCTRL2 0x1c
#define ETMR_TRACEENCTRL 0x24
-#define ETMTE_INCLEXCL (1 << 24)
+#define ETMTE_INCLEXCL BIT(24)
#define ETMR_TRACEENEVT 0x20
#define ETMCTRL_OPTS (ETMCTRL_DO_CPRT | \
ETMCTRL_DATA_DO_ADDR | \
@@ -146,12 +134,12 @@
#define ETBR_CTRL 0x20
#define ETBR_FORMATTERCTRL 0x304
#define ETBFF_ENFTC 1
-#define ETBFF_ENFCONT (1 << 1)
-#define ETBFF_FONFLIN (1 << 4)
-#define ETBFF_MANUAL_FLUSH (1 << 6)
-#define ETBFF_TRIGIN (1 << 8)
-#define ETBFF_TRIGEVT (1 << 9)
-#define ETBFF_TRIGFL (1 << 10)
+#define ETBFF_ENFCONT BIT(1)
+#define ETBFF_FONFLIN BIT(4)
+#define ETBFF_MANUAL_FLUSH BIT(6)
+#define ETBFF_TRIGIN BIT(8)
+#define ETBFF_TRIGEVT BIT(9)
+#define ETBFF_TRIGFL BIT(10)
#define etb_writel(t, v, x) \
(__raw_writel((v), (t)->etb_regs + (x)))
diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
index e4dfa69..cbb0bc2 100644
--- a/arch/arm/include/asm/module.h
+++ b/arch/arm/include/asm/module.h
@@ -7,20 +7,27 @@
struct unwind_table;
-struct mod_arch_specific
-{
#ifdef CONFIG_ARM_UNWIND
- Elf_Shdr *unw_sec_init;
- Elf_Shdr *unw_sec_devinit;
- Elf_Shdr *unw_sec_core;
- Elf_Shdr *sec_init_text;
- Elf_Shdr *sec_devinit_text;
- Elf_Shdr *sec_core_text;
- struct unwind_table *unwind_init;
- struct unwind_table *unwind_devinit;
- struct unwind_table *unwind_core;
-#endif
+struct arm_unwind_mapping {
+ Elf_Shdr *unw_sec;
+ Elf_Shdr *sec_text;
+ struct unwind_table *unwind;
};
+enum {
+ ARM_SEC_INIT,
+ ARM_SEC_DEVINIT,
+ ARM_SEC_CORE,
+ ARM_SEC_EXIT,
+ ARM_SEC_DEVEXIT,
+ ARM_SEC_MAX,
+};
+struct mod_arch_specific {
+ struct arm_unwind_mapping map[ARM_SEC_MAX];
+};
+#else
+struct mod_arch_specific {
+};
+#endif
/*
* Include the ARM architecture version.
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index 48837e6..b5799a3 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -17,7 +17,7 @@
* counter interrupts are regular interrupts and not an NMI. This
* means that when we receive the interrupt we can call
* perf_event_do_pending() that handles all of the work with
- * interrupts enabled.
+ * interrupts disabled.
*/
static inline void
set_perf_event_pending(void)
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index ab68cf1..a9672e8 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -278,9 +278,24 @@
#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
-#define set_pte_at(mm,addr,ptep,pteval) do { \
- set_pte_ext(ptep, pteval, (addr) >= TASK_SIZE ? 0 : PTE_EXT_NG); \
- } while (0)
+#if __LINUX_ARM_ARCH__ < 6
+static inline void __sync_icache_dcache(pte_t pteval)
+{
+}
+#else
+extern void __sync_icache_dcache(pte_t pteval);
+#endif
+
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval)
+{
+ if (addr >= TASK_SIZE)
+ set_pte_ext(ptep, pteval, 0);
+ else {
+ __sync_icache_dcache(pteval);
+ set_pte_ext(ptep, pteval, PTE_EXT_NG);
+ }
+}
/*
* The following only work if pte_present() is true.
@@ -290,8 +305,13 @@
#define pte_write(pte) (pte_val(pte) & L_PTE_WRITE)
#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
+#define pte_exec(pte) (pte_val(pte) & L_PTE_EXEC)
#define pte_special(pte) (0)
+#define pte_present_user(pte) \
+ ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
+ (L_PTE_PRESENT | L_PTE_USER))
+
#define PTE_BIT_FUNC(fn,op) \
static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
@@ -317,6 +337,10 @@
#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE)
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot);
#else
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)
diff --git a/arch/arm/include/asm/smp_mpidr.h b/arch/arm/include/asm/smp_mpidr.h
new file mode 100644
index 0000000..6a9307d
--- /dev/null
+++ b/arch/arm/include/asm/smp_mpidr.h
@@ -0,0 +1,17 @@
+#ifndef ASMARM_SMP_MIDR_H
+#define ASMARM_SMP_MIDR_H
+
+#define hard_smp_processor_id() \
+ ({ \
+ unsigned int cpunum; \
+ __asm__("\n" \
+ "1: mrc p15, 0, %0, c0, c0, 5\n" \
+ " .pushsection \".alt.smp.init\", \"a\"\n"\
+ " .long 1b\n" \
+ " mov %0, #0\n" \
+ " .popsection" \
+ : "=r" (cpunum)); \
+ cpunum &= 0x0F; \
+ })
+
+#endif
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index e621530..f24c1b9 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -7,15 +7,40 @@
#include <asm/cputype.h>
+/*
+ * Return true if we are running on a SMP platform
+ */
+static inline bool is_smp(void)
+{
+#ifndef CONFIG_SMP
+ return false;
+#elif defined(CONFIG_SMP_ON_UP)
+ extern unsigned int smp_on_up;
+ return !!smp_on_up;
+#else
+ return true;
+#endif
+}
+
/* all SMP configurations have the extended CPUID registers */
static inline int tlb_ops_need_broadcast(void)
{
+ if (!is_smp())
+ return 0;
+
return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2;
}
+#if !defined(CONFIG_SMP) || __LINUX_ARM_ARCH__ >= 7
+#define cache_ops_need_broadcast() 0
+#else
static inline int cache_ops_need_broadcast(void)
{
+ if (!is_smp())
+ return 0;
+
return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1;
}
+#endif
#endif
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index b516cde..1120f18 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -329,6 +329,8 @@
extern void disable_hlt(void);
extern void enable_hlt(void);
+void cpu_idle_wait(void);
+
#include <asm-generic/cmpxchg-local.h>
#if __LINUX_ARM_ARCH__ < 6
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 33b546a..ce7378ea 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -70,6 +70,10 @@
#undef _TLB
#undef MULTI_TLB
+#ifdef CONFIG_SMP_ON_UP
+#define MULTI_TLB 1
+#endif
+
#define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE)
#ifdef CONFIG_CPU_TLB_V3
@@ -185,17 +189,23 @@
# define v6wbi_always_flags (-1UL)
#endif
-#ifdef CONFIG_SMP
-#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \
+#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \
TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
-#else
-#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \
+#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BTB | \
TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID)
-#endif
#ifdef CONFIG_CPU_TLB_V7
-# define v7wbi_possible_flags v7wbi_tlb_flags
-# define v7wbi_always_flags v7wbi_tlb_flags
+
+# ifdef CONFIG_SMP_ON_UP
+# define v7wbi_possible_flags (v7wbi_tlb_flags_smp | v7wbi_tlb_flags_up)
+# define v7wbi_always_flags (v7wbi_tlb_flags_smp & v7wbi_tlb_flags_up)
+# elif defined(CONFIG_SMP)
+# define v7wbi_possible_flags v7wbi_tlb_flags_smp
+# define v7wbi_always_flags v7wbi_tlb_flags_smp
+# else
+# define v7wbi_possible_flags v7wbi_tlb_flags_up
+# define v7wbi_always_flags v7wbi_tlb_flags_up
+# endif
# ifdef _TLB
# define MULTI_TLB 1
# else
@@ -560,12 +570,20 @@
#endif
/*
- * if PG_dcache_dirty is set for the page, we need to ensure that any
+ * If PG_dcache_clean is not set for the page, we need to ensure that any
* cache entries for the kernels virtual memory range are written
- * back to the page.
+ * back to the page. On ARMv6 and later, the cache coherency is handled via
+ * the set_pte_at() function.
*/
+#if __LINUX_ARM_ARCH__ < 6
extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
pte_t *ptep);
+#else
+static inline void update_mmu_cache(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+}
+#endif
#endif
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index d02cfb6..c891eb7 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -393,6 +393,9 @@
#define __NR_perf_event_open (__NR_SYSCALL_BASE+364)
#define __NR_recvmmsg (__NR_SYSCALL_BASE+365)
#define __NR_accept4 (__NR_SYSCALL_BASE+366)
+#define __NR_fanotify_init (__NR_SYSCALL_BASE+367)
+#define __NR_fanotify_mark (__NR_SYSCALL_BASE+368)
+#define __NR_prlimit64 (__NR_SYSCALL_BASE+369)
/*
* The following SWIs are ARM private.
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 8214bfe..e5e1e53 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -165,6 +165,8 @@
#endif
#ifdef CONFIG_FUNCTION_TRACER
+#ifdef CONFIG_OLD_MCOUNT
EXPORT_SYMBOL(mcount);
+#endif
EXPORT_SYMBOL(__gnu_mcount_nc);
#endif
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index afeb71f..5c26ecc 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -376,6 +376,9 @@
CALL(sys_perf_event_open)
/* 365 */ CALL(sys_recvmmsg)
CALL(sys_accept4)
+ CALL(sys_fanotify_init)
+ CALL(sys_fanotify_mark)
+ CALL(sys_prlimit64)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index bb8e93a..c09e357 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -46,7 +46,8 @@
* this macro assumes that irqstat (r6) and base (r5) are
* preserved from get_irqnr_and_base above
*/
- test_for_ipi r0, r6, r5, lr
+ ALT_SMP(test_for_ipi r0, r6, r5, lr)
+ ALT_UP_B(9997f)
movne r0, sp
adrne lr, BSYM(1b)
bne do_IPI
@@ -57,6 +58,7 @@
adrne lr, BSYM(1b)
bne do_local_timer
#endif
+9997:
#endif
.endm
@@ -965,11 +967,8 @@
beq 1b
rsbs r0, r3, #0
/* beware -- each __kuser slot must be 8 instructions max */
-#ifdef CONFIG_SMP
- b __kuser_memory_barrier
-#else
- usr_ret lr
-#endif
+ ALT_SMP(b __kuser_memory_barrier)
+ ALT_UP(usr_ret lr)
#endif
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index f05a35a..2d23ad9 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -48,6 +48,8 @@
beq no_work_pending
mov r0, sp @ 'regs'
mov r2, why @ 'syscall'
+ tst r1, #_TIF_SIGPENDING @ delivering a signal?
+ movne why, #0 @ prevent further restarts
bl do_notify_resume
b ret_slow_syscall @ Check work again
@@ -127,30 +129,58 @@
* clobber the ip register. This is OK because the ARM calling convention
* allows it to be clobbered in subroutines and doesn't use it to hold
* parameters.)
+ *
+ * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
+ * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
+ * arch/arm/kernel/ftrace.c).
*/
+
+#ifndef CONFIG_OLD_MCOUNT
+#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
+#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
+#endif
+#endif
+
#ifdef CONFIG_DYNAMIC_FTRACE
-ENTRY(mcount)
+ENTRY(__gnu_mcount_nc)
+ mov ip, lr
+ ldmia sp!, {lr}
+ mov pc, ip
+ENDPROC(__gnu_mcount_nc)
+
+ENTRY(ftrace_caller)
stmdb sp!, {r0-r3, lr}
mov r0, lr
sub r0, r0, #MCOUNT_INSN_SIZE
+ ldr r1, [sp, #20]
- .globl mcount_call
-mcount_call:
+ .global ftrace_call
+ftrace_call:
bl ftrace_stub
- ldr lr, [fp, #-4] @ restore lr
- ldmia sp!, {r0-r3, pc}
+ ldmia sp!, {r0-r3, ip, lr}
+ mov pc, ip
+ENDPROC(ftrace_caller)
-ENTRY(ftrace_caller)
+#ifdef CONFIG_OLD_MCOUNT
+ENTRY(mcount)
+ stmdb sp!, {lr}
+ ldr lr, [fp, #-4]
+ ldmia sp!, {pc}
+ENDPROC(mcount)
+
+ENTRY(ftrace_caller_old)
stmdb sp!, {r0-r3, lr}
ldr r1, [fp, #-4]
mov r0, lr
sub r0, r0, #MCOUNT_INSN_SIZE
- .globl ftrace_call
-ftrace_call:
+ .globl ftrace_call_old
+ftrace_call_old:
bl ftrace_stub
ldr lr, [fp, #-4] @ restore lr
ldmia sp!, {r0-r3, pc}
+ENDPROC(ftrace_caller_old)
+#endif
#else
@@ -158,7 +188,7 @@
stmdb sp!, {r0-r3, lr}
ldr r0, =ftrace_trace_function
ldr r2, [r0]
- adr r0, ftrace_stub
+ adr r0, .Lftrace_stub
cmp r0, r2
bne gnu_trace
ldmia sp!, {r0-r3, ip, lr}
@@ -168,11 +198,19 @@
ldr r1, [sp, #20] @ lr of instrumented routine
mov r0, lr
sub r0, r0, #MCOUNT_INSN_SIZE
- mov lr, pc
+ adr lr, BSYM(1f)
mov pc, r2
+1:
ldmia sp!, {r0-r3, ip, lr}
mov pc, ip
+ENDPROC(__gnu_mcount_nc)
+#ifdef CONFIG_OLD_MCOUNT
+/*
+ * This is under an ifdef in order to force link-time errors for people trying
+ * to build with !FRAME_POINTER with a GCC which doesn't use the new-style
+ * mcount.
+ */
ENTRY(mcount)
stmdb sp!, {r0-r3, lr}
ldr r0, =ftrace_trace_function
@@ -191,12 +229,15 @@
mov pc, r2
ldr lr, [fp, #-4] @ restore lr
ldmia sp!, {r0-r3, pc}
+ENDPROC(mcount)
+#endif
#endif /* CONFIG_DYNAMIC_FTRACE */
- .globl ftrace_stub
-ftrace_stub:
+ENTRY(ftrace_stub)
+.Lftrace_stub:
mov pc, lr
+ENDPROC(ftrace_stub)
#endif /* CONFIG_FUNCTION_TRACER */
@@ -418,11 +459,13 @@
sys_sigreturn_wrapper:
add r0, sp, #S_OFF
+ mov why, #0 @ prevent syscall restart handling
b sys_sigreturn
ENDPROC(sys_sigreturn_wrapper)
sys_rt_sigreturn_wrapper:
add r0, sp, #S_OFF
+ mov why, #0 @ prevent syscall restart handling
b sys_rt_sigreturn
ENDPROC(sys_rt_sigreturn_wrapper)
diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c
index 33c7077..a48d512 100644
--- a/arch/arm/kernel/etm.c
+++ b/arch/arm/kernel/etm.c
@@ -30,6 +30,21 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexander Shishkin");
+/*
+ * ETM tracer state
+ */
+struct tracectx {
+ unsigned int etb_bufsz;
+ void __iomem *etb_regs;
+ void __iomem *etm_regs;
+ unsigned long flags;
+ int ncmppairs;
+ int etm_portsz;
+ struct device *dev;
+ struct clk *emu_clk;
+ struct mutex mutex;
+};
+
static struct tracectx tracer;
static inline bool trace_isrunning(struct tracectx *t)
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index 0298286..971ac8c 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -2,102 +2,194 @@
* Dynamic function tracing support.
*
* Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
+ * Copyright (C) 2010 Rabin Vincent <rabin@rab.in>
*
* For licencing details, see COPYING.
*
* Defines low-level handling of mcount calls when the kernel
* is compiled with the -pg flag. When using dynamic ftrace, the
- * mcount call-sites get patched lazily with NOP till they are
- * enabled. All code mutation routines here take effect atomically.
+ * mcount call-sites get patched with NOP till they are enabled.
+ * All code mutation routines here are called under stop_machine().
*/
#include <linux/ftrace.h>
+#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/ftrace.h>
-#define PC_OFFSET 8
-#define BL_OPCODE 0xeb000000
-#define BL_OFFSET_MASK 0x00ffffff
+#ifdef CONFIG_THUMB2_KERNEL
+#define NOP 0xeb04f85d /* pop.w {lr} */
+#else
+#define NOP 0xe8bd4000 /* pop {lr} */
+#endif
-static unsigned long bl_insn;
-static const unsigned long NOP = 0xe1a00000; /* mov r0, r0 */
+#ifdef CONFIG_OLD_MCOUNT
+#define OLD_MCOUNT_ADDR ((unsigned long) mcount)
+#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
-unsigned char *ftrace_nop_replace(void)
+#define OLD_NOP 0xe1a00000 /* mov r0, r0 */
+
+static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
{
- return (char *)&NOP;
+ return rec->arch.old_mcount ? OLD_NOP : NOP;
}
+static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
+{
+ if (!rec->arch.old_mcount)
+ return addr;
+
+ if (addr == MCOUNT_ADDR)
+ addr = OLD_MCOUNT_ADDR;
+ else if (addr == FTRACE_ADDR)
+ addr = OLD_FTRACE_ADDR;
+
+ return addr;
+}
+#else
+static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
+{
+ return NOP;
+}
+
+static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
+{
+ return addr;
+}
+#endif
+
/* construct a branch (BL) instruction to addr */
-unsigned char *ftrace_call_replace(unsigned long pc, unsigned long addr)
+#ifdef CONFIG_THUMB2_KERNEL
+static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
+{
+ unsigned long s, j1, j2, i1, i2, imm10, imm11;
+ unsigned long first, second;
+ long offset;
+
+ offset = (long)addr - (long)(pc + 4);
+ if (offset < -16777216 || offset > 16777214) {
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+
+ s = (offset >> 24) & 0x1;
+ i1 = (offset >> 23) & 0x1;
+ i2 = (offset >> 22) & 0x1;
+ imm10 = (offset >> 12) & 0x3ff;
+ imm11 = (offset >> 1) & 0x7ff;
+
+ j1 = (!i1) ^ s;
+ j2 = (!i2) ^ s;
+
+ first = 0xf000 | (s << 10) | imm10;
+ second = 0xd000 | (j1 << 13) | (j2 << 11) | imm11;
+
+ return (second << 16) | first;
+}
+#else
+static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
{
long offset;
- offset = (long)addr - (long)(pc + PC_OFFSET);
+ offset = (long)addr - (long)(pc + 8);
if (unlikely(offset < -33554432 || offset > 33554428)) {
/* Can't generate branches that far (from ARM ARM). Ftrace
* doesn't generate branches outside of kernel text.
*/
WARN_ON_ONCE(1);
- return NULL;
+ return 0;
}
- offset = (offset >> 2) & BL_OFFSET_MASK;
- bl_insn = BL_OPCODE | offset;
- return (unsigned char *)&bl_insn;
+
+ offset = (offset >> 2) & 0x00ffffff;
+
+ return 0xeb000000 | offset;
}
+#endif
-int ftrace_modify_code(unsigned long pc, unsigned char *old_code,
- unsigned char *new_code)
+static int ftrace_modify_code(unsigned long pc, unsigned long old,
+ unsigned long new)
{
- unsigned long err = 0, replaced = 0, old, new;
+ unsigned long replaced;
- old = *(unsigned long *)old_code;
- new = *(unsigned long *)new_code;
+ if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE))
+ return -EFAULT;
- __asm__ __volatile__ (
- "1: ldr %1, [%2] \n"
- " cmp %1, %4 \n"
- "2: streq %3, [%2] \n"
- " cmpne %1, %3 \n"
- " movne %0, #2 \n"
- "3:\n"
+ if (replaced != old)
+ return -EINVAL;
- ".pushsection .fixup, \"ax\"\n"
- "4: mov %0, #1 \n"
- " b 3b \n"
- ".popsection\n"
+ if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE))
+ return -EPERM;
- ".pushsection __ex_table, \"a\"\n"
- " .long 1b, 4b \n"
- " .long 2b, 4b \n"
- ".popsection\n"
+ flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
- : "=r"(err), "=r"(replaced)
- : "r"(pc), "r"(new), "r"(old), "0"(err), "1"(replaced)
- : "memory");
-
- if (!err && (replaced == old))
- flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
-
- return err;
+ return 0;
}
int ftrace_update_ftrace_func(ftrace_func_t func)
{
- int ret;
unsigned long pc, old;
- unsigned char *new;
+ unsigned long new;
+ int ret;
pc = (unsigned long)&ftrace_call;
memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE);
new = ftrace_call_replace(pc, (unsigned long)func);
- ret = ftrace_modify_code(pc, (unsigned char *)&old, new);
+
+ ret = ftrace_modify_code(pc, old, new);
+
+#ifdef CONFIG_OLD_MCOUNT
+ if (!ret) {
+ pc = (unsigned long)&ftrace_call_old;
+ memcpy(&old, &ftrace_call_old, MCOUNT_INSN_SIZE);
+ new = ftrace_call_replace(pc, (unsigned long)func);
+
+ ret = ftrace_modify_code(pc, old, new);
+ }
+#endif
+
return ret;
}
-/* run from ftrace_init with irqs disabled */
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned long new, old;
+ unsigned long ip = rec->ip;
+
+ old = ftrace_nop_replace(rec);
+ new = ftrace_call_replace(ip, adjust_address(rec, addr));
+
+ return ftrace_modify_code(rec->ip, old, new);
+}
+
+int ftrace_make_nop(struct module *mod,
+ struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned long ip = rec->ip;
+ unsigned long old;
+ unsigned long new;
+ int ret;
+
+ old = ftrace_call_replace(ip, adjust_address(rec, addr));
+ new = ftrace_nop_replace(rec);
+ ret = ftrace_modify_code(ip, old, new);
+
+#ifdef CONFIG_OLD_MCOUNT
+ if (ret == -EINVAL && addr == MCOUNT_ADDR) {
+ rec->arch.old_mcount = true;
+
+ old = ftrace_call_replace(ip, adjust_address(rec, addr));
+ new = ftrace_nop_replace(rec);
+ ret = ftrace_modify_code(ip, old, new);
+ }
+#endif
+
+ return ret;
+}
+
int __init ftrace_dyn_arch_init(void *data)
{
- ftrace_mcount_set(data);
+ *(unsigned long *)data = 0;
+
return 0;
}
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index b9505aa..58a3e63 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -20,7 +20,7 @@
__switch_data:
.long __mmap_switched
.long __data_loc @ r4
- .long _data @ r5
+ .long _sdata @ r5
.long __bss_start @ r6
.long _end @ r7
.long processor_id @ r4
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index eb62bf9..b44d21e 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -86,6 +86,9 @@
movs r8, r5 @ invalid machine (r5=0)?
beq __error_a @ yes, error 'a'
bl __vet_atags
+#ifdef CONFIG_SMP_ON_UP
+ bl __fixup_smp
+#endif
bl __create_page_tables
/*
@@ -333,4 +336,51 @@
ENDPROC(__create_page_tables)
.ltorg
+#ifdef CONFIG_SMP_ON_UP
+__fixup_smp:
+ mov r7, #0x00070000
+ orr r6, r7, #0xff000000 @ mask 0xff070000
+ orr r7, r7, #0x41000000 @ val 0x41070000
+ and r0, r9, r6
+ teq r0, r7 @ ARM CPU and ARMv6/v7?
+ bne __fixup_smp_on_up @ no, assume UP
+
+ orr r6, r6, #0x0000ff00
+ orr r6, r6, #0x000000f0 @ mask 0xff07fff0
+ orr r7, r7, #0x0000b000
+ orr r7, r7, #0x00000020 @ val 0x4107b020
+ and r0, r9, r6
+ teq r0, r7 @ ARM 11MPCore?
+ moveq pc, lr @ yes, assume SMP
+
+ mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
+ tst r0, #1 << 31
+ movne pc, lr @ bit 31 => SMP
+
+__fixup_smp_on_up:
+ adr r0, 1f
+ ldmia r0, {r3, r6, r7}
+ sub r3, r0, r3
+ add r6, r6, r3
+ add r7, r7, r3
+2: cmp r6, r7
+ ldmia r6!, {r0, r4}
+ strlo r4, [r0, r3]
+ blo 2b
+ mov pc, lr
+ENDPROC(__fixup_smp)
+
+1: .word .
+ .word __smpalt_begin
+ .word __smpalt_end
+
+ .pushsection .data
+ .globl smp_on_up
+smp_on_up:
+ ALT_SMP(.long 1)
+ ALT_UP(.long 0)
+ .popsection
+
+#endif
+
#include "head-common.S"
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 6b46058..d9bd786 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -69,20 +69,31 @@
{
#ifdef CONFIG_ARM_UNWIND
Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum;
+ struct arm_unwind_mapping *maps = mod->arch.map;
for (s = sechdrs; s < sechdrs_end; s++) {
- if (strcmp(".ARM.exidx.init.text", secstrings + s->sh_name) == 0)
- mod->arch.unw_sec_init = s;
- else if (strcmp(".ARM.exidx.devinit.text", secstrings + s->sh_name) == 0)
- mod->arch.unw_sec_devinit = s;
- else if (strcmp(".ARM.exidx", secstrings + s->sh_name) == 0)
- mod->arch.unw_sec_core = s;
- else if (strcmp(".init.text", secstrings + s->sh_name) == 0)
- mod->arch.sec_init_text = s;
- else if (strcmp(".devinit.text", secstrings + s->sh_name) == 0)
- mod->arch.sec_devinit_text = s;
- else if (strcmp(".text", secstrings + s->sh_name) == 0)
- mod->arch.sec_core_text = s;
+ char const *secname = secstrings + s->sh_name;
+
+ if (strcmp(".ARM.exidx.init.text", secname) == 0)
+ maps[ARM_SEC_INIT].unw_sec = s;
+ else if (strcmp(".ARM.exidx.devinit.text", secname) == 0)
+ maps[ARM_SEC_DEVINIT].unw_sec = s;
+ else if (strcmp(".ARM.exidx", secname) == 0)
+ maps[ARM_SEC_CORE].unw_sec = s;
+ else if (strcmp(".ARM.exidx.exit.text", secname) == 0)
+ maps[ARM_SEC_EXIT].unw_sec = s;
+ else if (strcmp(".ARM.exidx.devexit.text", secname) == 0)
+ maps[ARM_SEC_DEVEXIT].unw_sec = s;
+ else if (strcmp(".init.text", secname) == 0)
+ maps[ARM_SEC_INIT].sec_text = s;
+ else if (strcmp(".devinit.text", secname) == 0)
+ maps[ARM_SEC_DEVINIT].sec_text = s;
+ else if (strcmp(".text", secname) == 0)
+ maps[ARM_SEC_CORE].sec_text = s;
+ else if (strcmp(".exit.text", secname) == 0)
+ maps[ARM_SEC_EXIT].sec_text = s;
+ else if (strcmp(".devexit.text", secname) == 0)
+ maps[ARM_SEC_DEVEXIT].sec_text = s;
}
#endif
return 0;
@@ -292,31 +303,22 @@
#ifdef CONFIG_ARM_UNWIND
static void register_unwind_tables(struct module *mod)
{
- if (mod->arch.unw_sec_init && mod->arch.sec_init_text)
- mod->arch.unwind_init =
- unwind_table_add(mod->arch.unw_sec_init->sh_addr,
- mod->arch.unw_sec_init->sh_size,
- mod->arch.sec_init_text->sh_addr,
- mod->arch.sec_init_text->sh_size);
- if (mod->arch.unw_sec_devinit && mod->arch.sec_devinit_text)
- mod->arch.unwind_devinit =
- unwind_table_add(mod->arch.unw_sec_devinit->sh_addr,
- mod->arch.unw_sec_devinit->sh_size,
- mod->arch.sec_devinit_text->sh_addr,
- mod->arch.sec_devinit_text->sh_size);
- if (mod->arch.unw_sec_core && mod->arch.sec_core_text)
- mod->arch.unwind_core =
- unwind_table_add(mod->arch.unw_sec_core->sh_addr,
- mod->arch.unw_sec_core->sh_size,
- mod->arch.sec_core_text->sh_addr,
- mod->arch.sec_core_text->sh_size);
+ int i;
+ for (i = 0; i < ARM_SEC_MAX; ++i) {
+ struct arm_unwind_mapping *map = &mod->arch.map[i];
+ if (map->unw_sec && map->sec_text)
+ map->unwind = unwind_table_add(map->unw_sec->sh_addr,
+ map->unw_sec->sh_size,
+ map->sec_text->sh_addr,
+ map->sec_text->sh_size);
+ }
}
static void unregister_unwind_tables(struct module *mod)
{
- unwind_table_del(mod->arch.unwind_init);
- unwind_table_del(mod->arch.unwind_devinit);
- unwind_table_del(mod->arch.unwind_core);
+ int i = ARM_SEC_MAX;
+ while (--i >= 0)
+ unwind_table_del(mod->arch.map[i].unwind);
}
#else
static inline void register_unwind_tables(struct module *mod) { }
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 417c392..ecbb028 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -319,8 +319,8 @@
{
struct hw_perf_event fake_event = event->hw;
- if (event->pmu && event->pmu != &pmu)
- return 0;
+ if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
+ return 1;
return armpmu->get_event_idx(cpuc, &fake_event) >= 0;
}
@@ -1041,8 +1041,8 @@
/*
* Handle the pending perf events.
*
- * Note: this call *must* be run with interrupts enabled. For
- * platforms that can have the PMU interrupts raised as a PMI, this
+ * Note: this call *must* be run with interrupts disabled. For
+ * platforms that can have the PMU interrupts raised as an NMI, this
* will not work.
*/
perf_event_do_pending();
@@ -2017,8 +2017,8 @@
/*
* Handle the pending perf events.
*
- * Note: this call *must* be run with interrupts enabled. For
- * platforms that can have the PMU interrupts raised as a PMI, this
+ * Note: this call *must* be run with interrupts disabled. For
+ * platforms that can have the PMU interrupts raised as an NMI, this
* will not work.
*/
perf_event_do_pending();
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 974af1c..3af34bf 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -136,6 +136,25 @@
void (*arm_pm_restart)(char str, const char *cmd) = arm_machine_restart;
EXPORT_SYMBOL_GPL(arm_pm_restart);
+static void do_nothing(void *unused)
+{
+}
+
+/*
+ * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
+ * pm_idle and update to new pm_idle value. Required while changing pm_idle
+ * handler on SMP systems.
+ *
+ * Caller must have changed pm_idle to the new value before the call. Old
+ * pm_idle value will not be used by any CPU after the return of this function.
+ */
+void cpu_idle_wait(void)
+{
+ smp_mb();
+ /* kick all the CPUs so that they exit out of pm_idle */
+ smp_call_function(do_nothing, NULL, 1);
+}
+EXPORT_SYMBOL_GPL(cpu_idle_wait);
/*
* This is our default idle handler. We need to disable
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index d5231ae..336f14e 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -36,6 +36,7 @@
#include <asm/procinfo.h>
#include <asm/sections.h>
#include <asm/setup.h>
+#include <asm/smp_plat.h>
#include <asm/mach-types.h>
#include <asm/cacheflush.h>
#include <asm/cachetype.h>
@@ -238,6 +239,35 @@
return cpu_arch;
}
+static int cpu_has_aliasing_icache(unsigned int arch)
+{
+ int aliasing_icache;
+ unsigned int id_reg, num_sets, line_size;
+
+ /* arch specifies the register format */
+ switch (arch) {
+ case CPU_ARCH_ARMv7:
+ asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
+ : /* No output operands */
+ : "r" (1));
+ isb();
+ asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
+ : "=r" (id_reg));
+ line_size = 4 << ((id_reg & 0x7) + 2);
+ num_sets = ((id_reg >> 13) & 0x7fff) + 1;
+ aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
+ break;
+ case CPU_ARCH_ARMv6:
+ aliasing_icache = read_cpuid_cachetype() & (1 << 11);
+ break;
+ default:
+ /* I-cache aliases will be handled by D-cache aliasing code */
+ aliasing_icache = 0;
+ }
+
+ return aliasing_icache;
+}
+
static void __init cacheid_init(void)
{
unsigned int cachetype = read_cpuid_cachetype();
@@ -249,10 +279,15 @@
cacheid = CACHEID_VIPT_NONALIASING;
if ((cachetype & (3 << 14)) == 1 << 14)
cacheid |= CACHEID_ASID_TAGGED;
- } else if (cachetype & (1 << 23))
+ else if (cpu_has_aliasing_icache(CPU_ARCH_ARMv7))
+ cacheid |= CACHEID_VIPT_I_ALIASING;
+ } else if (cachetype & (1 << 23)) {
cacheid = CACHEID_VIPT_ALIASING;
- else
+ } else {
cacheid = CACHEID_VIPT_NONALIASING;
+ if (cpu_has_aliasing_icache(CPU_ARCH_ARMv6))
+ cacheid |= CACHEID_VIPT_I_ALIASING;
+ }
} else {
cacheid = CACHEID_VIVT;
}
@@ -263,7 +298,7 @@
cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
cache_is_vivt() ? "VIVT" :
icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
- cache_is_vipt_aliasing() ? "VIPT aliasing" :
+ icache_is_vipt_aliasing() ? "VIPT aliasing" :
cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
}
@@ -490,7 +525,7 @@
kernel_code.start = virt_to_phys(_text);
kernel_code.end = virt_to_phys(_etext - 1);
- kernel_data.start = virt_to_phys(_data);
+ kernel_data.start = virt_to_phys(_sdata);
kernel_data.end = virt_to_phys(_end - 1);
for (i = 0; i < mi->nr_banks; i++) {
@@ -825,7 +860,8 @@
request_standard_resources(&meminfo, mdesc);
#ifdef CONFIG_SMP
- smp_init_cpus();
+ if (is_smp())
+ smp_init_cpus();
#endif
reserve_crashkernel();
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 40dc74f..32e16da 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -567,7 +567,8 @@
{
cpumask_t mask = cpu_online_map;
cpu_clear(smp_processor_id(), mask);
- send_ipi_message(&mask, IPI_CPU_STOP);
+ if (!cpus_empty(mask))
+ send_ipi_message(&mask, IPI_CPU_STOP);
}
/*
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index dd81a91..2a16176 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -146,6 +146,8 @@
addr < table->end_addr) {
idx = search_index(addr, table->start,
table->stop - 1);
+ /* Move-to-front to exploit common traces */
+ list_move(&table->list, &unwind_tables);
break;
}
}
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index b16c079..065d35d 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -40,6 +40,11 @@
__tagtable_begin = .;
*(.taglist.init)
__tagtable_end = .;
+#ifdef CONFIG_SMP_ON_UP
+ __smpalt_begin = .;
+ *(.alt.smp.init)
+ __smpalt_end = .;
+#endif
INIT_SETUP(16)
@@ -104,8 +109,6 @@
RO_DATA(PAGE_SIZE)
- _etext = .; /* End of text and rodata section */
-
#ifdef CONFIG_ARM_UNWIND
/*
* Stack unwinding tables
@@ -123,6 +126,8 @@
}
#endif
+ _etext = .; /* End of text and rodata section */
+
#ifdef CONFIG_XIP_KERNEL
__data_loc = ALIGN(4); /* location in binary */
. = PAGE_OFFSET + TEXT_OFFSET;
@@ -237,6 +242,12 @@
/* Default discards */
DISCARDS
+
+#ifndef CONFIG_SMP_ON_UP
+ /DISCARD/ : {
+ *(.alt.smp.init)
+ }
+#endif
}
/*
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 939bccd..ca33862 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -248,6 +248,12 @@
Select this if you are using a Eukrea Electromatique's
CPU9260 Board <http://www.eukrea.com/>
+config MACH_FLEXIBITY
+ bool "Flexibity Connect board"
+ help
+ Select this if you are using Flexibity Connect board
+ <http://www.flexibity.com>
+
endif
# ----------------------------------------------------------
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
index ca2ac00..7cbe06d 100644
--- a/arch/arm/mach-at91/Makefile
+++ b/arch/arm/mach-at91/Makefile
@@ -46,6 +46,7 @@
obj-$(CONFIG_MACH_QIL_A9260) += board-qil-a9260.o
obj-$(CONFIG_MACH_AFEB9260) += board-afeb-9260v1.o
obj-$(CONFIG_MACH_CPU9260) += board-cpu9krea.o
+obj-$(CONFIG_MACH_FLEXIBITY) += board-flexibity.o
# AT91SAM9261 board-specific support
obj-$(CONFIG_MACH_AT91SAM9261EK) += board-sam9261ek.o
diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c
index 753c0d3..c67b47f 100644
--- a/arch/arm/mach-at91/at91sam9g45.c
+++ b/arch/arm/mach-at91/at91sam9g45.c
@@ -121,8 +121,8 @@
.pmc_mask = 1 << AT91SAM9G45_ID_SSC1,
.type = CLK_TYPE_PERIPHERAL,
};
-static struct clk tcb_clk = {
- .name = "tcb_clk",
+static struct clk tcb0_clk = {
+ .name = "tcb0_clk",
.pmc_mask = 1 << AT91SAM9G45_ID_TCB,
.type = CLK_TYPE_PERIPHERAL,
};
@@ -192,6 +192,14 @@
.parent = &uhphs_clk,
};
+/* One additional fake clock for second TC block */
+static struct clk tcb1_clk = {
+ .name = "tcb1_clk",
+ .pmc_mask = 0,
+ .type = CLK_TYPE_PERIPHERAL,
+ .parent = &tcb0_clk,
+};
+
static struct clk *periph_clocks[] __initdata = {
&pioA_clk,
&pioB_clk,
@@ -208,7 +216,7 @@
&spi1_clk,
&ssc0_clk,
&ssc1_clk,
- &tcb_clk,
+ &tcb0_clk,
&pwm_clk,
&tsc_clk,
&dma_clk,
@@ -221,6 +229,7 @@
&mmc1_clk,
// irq0
&ohci_clk,
+ &tcb1_clk,
};
/*
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index 809114d..1276bab 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -46,7 +46,7 @@
.end = AT91_BASE_SYS + AT91_DMA + SZ_512 - 1,
.flags = IORESOURCE_MEM,
},
- [2] = {
+ [1] = {
.start = AT91SAM9G45_ID_DMA,
.end = AT91SAM9G45_ID_DMA,
.flags = IORESOURCE_IRQ,
@@ -426,7 +426,7 @@
.sda_is_open_drain = 1,
.scl_pin = AT91_PIN_PA21,
.scl_is_open_drain = 1,
- .udelay = 2, /* ~100 kHz */
+ .udelay = 5, /* ~100 kHz */
};
static struct platform_device at91sam9g45_twi0_device = {
@@ -440,7 +440,7 @@
.sda_is_open_drain = 1,
.scl_pin = AT91_PIN_PB11,
.scl_is_open_drain = 1,
- .udelay = 2, /* ~100 kHz */
+ .udelay = 5, /* ~100 kHz */
};
static struct platform_device at91sam9g45_twi1_device = {
@@ -835,9 +835,9 @@
static void __init at91_add_device_tc(void)
{
/* this chip has one clock and irq for all six TC channels */
- at91_clock_associate("tcb_clk", &at91sam9g45_tcb0_device.dev, "t0_clk");
+ at91_clock_associate("tcb0_clk", &at91sam9g45_tcb0_device.dev, "t0_clk");
platform_device_register(&at91sam9g45_tcb0_device);
- at91_clock_associate("tcb_clk", &at91sam9g45_tcb1_device.dev, "t0_clk");
+ at91_clock_associate("tcb1_clk", &at91sam9g45_tcb1_device.dev, "t0_clk");
platform_device_register(&at91sam9g45_tcb1_device);
}
#else
diff --git a/arch/arm/mach-at91/board-flexibity.c b/arch/arm/mach-at91/board-flexibity.c
new file mode 100644
index 0000000..216c8ca
--- /dev/null
+++ b/arch/arm/mach-at91/board-flexibity.c
@@ -0,0 +1,164 @@
+/*
+ * linux/arch/arm/mach-at91/board-flexibity.c
+ *
+ * Copyright (C) 2010 Flexibity
+ * Copyright (C) 2005 SAN People
+ * Copyright (C) 2006 Atmel
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/input.h>
+#include <linux/gpio.h>
+
+#include <asm/mach-types.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/irq.h>
+
+#include <mach/hardware.h>
+#include <mach/board.h>
+
+#include "generic.h"
+
+static void __init flexibity_map_io(void)
+{
+ /* Initialize processor: 18.432 MHz crystal */
+ at91sam9260_initialize(18432000);
+
+ /* DBGU on ttyS0. (Rx & Tx only) */
+ at91_register_uart(0, 0, 0);
+
+ /* set serial console to ttyS0 (ie, DBGU) */
+ at91_set_serial_console(0);
+}
+
+static void __init flexibity_init_irq(void)
+{
+ at91sam9260_init_interrupts(NULL);
+}
+
+/* USB Host port */
+static struct at91_usbh_data __initdata flexibity_usbh_data = {
+ .ports = 2,
+};
+
+/* USB Device port */
+static struct at91_udc_data __initdata flexibity_udc_data = {
+ .vbus_pin = AT91_PIN_PC5,
+ .pullup_pin = 0, /* pull-up driven by UDC */
+};
+
+/* SPI devices */
+static struct spi_board_info flexibity_spi_devices[] = {
+ { /* DataFlash chip */
+ .modalias = "mtd_dataflash",
+ .chip_select = 1,
+ .max_speed_hz = 15 * 1000 * 1000,
+ .bus_num = 0,
+ },
+};
+
+/* MCI (SD/MMC) */
+static struct at91_mmc_data __initdata flexibity_mmc_data = {
+ .slot_b = 0,
+ .wire4 = 1,
+ .det_pin = AT91_PIN_PC9,
+ .wp_pin = AT91_PIN_PC4,
+};
+
+/* LEDs */
+static struct gpio_led flexibity_leds[] = {
+ {
+ .name = "usb1:green",
+ .gpio = AT91_PIN_PA12,
+ .active_low = 1,
+ .default_trigger = "default-on",
+ },
+ {
+ .name = "usb1:red",
+ .gpio = AT91_PIN_PA13,
+ .active_low = 1,
+ .default_trigger = "default-on",
+ },
+ {
+ .name = "usb2:green",
+ .gpio = AT91_PIN_PB26,
+ .active_low = 1,
+ .default_trigger = "default-on",
+ },
+ {
+ .name = "usb2:red",
+ .gpio = AT91_PIN_PB27,
+ .active_low = 1,
+ .default_trigger = "default-on",
+ },
+ {
+ .name = "usb3:green",
+ .gpio = AT91_PIN_PC8,
+ .active_low = 1,
+ .default_trigger = "default-on",
+ },
+ {
+ .name = "usb3:red",
+ .gpio = AT91_PIN_PC6,
+ .active_low = 1,
+ .default_trigger = "default-on",
+ },
+ {
+ .name = "usb4:green",
+ .gpio = AT91_PIN_PB4,
+ .active_low = 1,
+ .default_trigger = "default-on",
+ },
+ {
+ .name = "usb4:red",
+ .gpio = AT91_PIN_PB5,
+ .active_low = 1,
+ .default_trigger = "default-on",
+ }
+};
+
+static void __init flexibity_board_init(void)
+{
+ /* Serial */
+ at91_add_device_serial();
+ /* USB Host */
+ at91_add_device_usbh(&flexibity_usbh_data);
+ /* USB Device */
+ at91_add_device_udc(&flexibity_udc_data);
+ /* SPI */
+ at91_add_device_spi(flexibity_spi_devices,
+ ARRAY_SIZE(flexibity_spi_devices));
+ /* MMC */
+ at91_add_device_mmc(0, &flexibity_mmc_data);
+ /* LEDs */
+ at91_gpio_leds(flexibity_leds, ARRAY_SIZE(flexibity_leds));
+}
+
+MACHINE_START(FLEXIBITY, "Flexibity Connect")
+ /* Maintainer: Maxim Osipov */
+ .phys_io = AT91_BASE_SYS,
+ .io_pg_offst = (AT91_VA_BASE_SYS >> 18) & 0xfffc,
+ .boot_params = AT91_SDRAM_BASE + 0x100,
+ .timer = &at91sam926x_timer,
+ .map_io = flexibity_map_io,
+ .init_irq = flexibity_init_irq,
+ .init_machine = flexibity_board_init,
+MACHINE_END
diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c
index c4c8865..65eb094 100644
--- a/arch/arm/mach-at91/board-sam9261ek.c
+++ b/arch/arm/mach-at91/board-sam9261ek.c
@@ -93,11 +93,12 @@
.start = AT91_PIN_PC11,
.end = AT91_PIN_PC11,
.flags = IORESOURCE_IRQ
+ | IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE,
}
};
static struct dm9000_plat_data dm9000_platdata = {
- .flags = DM9000_PLATF_16BITONLY,
+ .flags = DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM,
};
static struct platform_device dm9000_device = {
@@ -168,17 +169,6 @@
/*
- * MCI (SD/MMC)
- */
-static struct at91_mmc_data __initdata ek_mmc_data = {
- .wire4 = 1,
-// .det_pin = ... not connected
-// .wp_pin = ... not connected
-// .vcc_pin = ... not connected
-};
-
-
-/*
* NAND flash
*/
static struct mtd_partition __initdata ek_nand_partition[] = {
@@ -246,6 +236,10 @@
at91_add_device_nand(&ek_nand_data);
}
+/*
+ * SPI related devices
+ */
+#if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE)
/*
* ADS7846 Touchscreen
@@ -356,6 +350,19 @@
#endif
};
+#else /* CONFIG_SPI_ATMEL_* */
+/* spi0 and mmc/sd share the same PIO pins: cannot be used at the same time */
+
+/*
+ * MCI (SD/MMC)
+ * det_pin, wp_pin and vcc_pin are not connected
+ */
+static struct at91_mmc_data __initdata ek_mmc_data = {
+ .wire4 = 1,
+};
+
+#endif /* CONFIG_SPI_ATMEL_* */
+
/*
* LCD Controller
diff --git a/arch/arm/mach-at91/clock.c b/arch/arm/mach-at91/clock.c
index 7f7da43..7525cee 100644
--- a/arch/arm/mach-at91/clock.c
+++ b/arch/arm/mach-at91/clock.c
@@ -501,7 +501,8 @@
int __init clk_register(struct clk *clk)
{
if (clk_is_peripheral(clk)) {
- clk->parent = &mck;
+ if (!clk->parent)
+ clk->parent = &mck;
clk->mode = pmc_periph_mode;
list_add_tail(&clk->node, &clocks);
}
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index 3d996b6..9be261b 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -769,8 +769,7 @@
.virtual = SRAM_VIRT,
.pfn = __phys_to_pfn(0x00010000),
.length = SZ_32K,
- /* MT_MEMORY_NONCACHED requires supersection alignment */
- .type = MT_DEVICE,
+ .type = MT_MEMORY_NONCACHED,
},
};
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 6b6f4c6..7781e35 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -969,8 +969,7 @@
.virtual = SRAM_VIRT,
.pfn = __phys_to_pfn(0x00010000),
.length = SZ_32K,
- /* MT_MEMORY_NONCACHED requires supersection alignment */
- .type = MT_DEVICE,
+ .type = MT_MEMORY_NONCACHED,
},
};
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index 40fec31..5e5b0a7 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -653,8 +653,7 @@
.virtual = SRAM_VIRT,
.pfn = __phys_to_pfn(0x00008000),
.length = SZ_16K,
- /* MT_MEMORY_NONCACHED requires supersection alignment */
- .type = MT_DEVICE,
+ .type = MT_MEMORY_NONCACHED,
},
};
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index e4a3df1..26e8a9c 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -737,8 +737,7 @@
.virtual = SRAM_VIRT,
.pfn = __phys_to_pfn(0x00010000),
.length = SZ_32K,
- /* MT_MEMORY_NONCACHED requires supersection alignment */
- .type = MT_DEVICE,
+ .type = MT_MEMORY_NONCACHED,
},
};
diff --git a/arch/arm/mach-dove/include/mach/io.h b/arch/arm/mach-dove/include/mach/io.h
index 3b3e472..eb4936f 100644
--- a/arch/arm/mach-dove/include/mach/io.h
+++ b/arch/arm/mach-dove/include/mach/io.h
@@ -13,8 +13,8 @@
#define IO_SPACE_LIMIT 0xffffffff
-#define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_PHYS_BASE) +\
- DOVE_PCIE0_IO_VIRT_BASE))
-#define __mem_pci(a) (a)
+#define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_BUS_BASE) + \
+ DOVE_PCIE0_IO_VIRT_BASE))
+#define __mem_pci(a) (a)
#endif
diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c
index 8bf3cec..4566bd1 100644
--- a/arch/arm/mach-ep93xx/clock.c
+++ b/arch/arm/mach-ep93xx/clock.c
@@ -560,4 +560,4 @@
clkdev_add_table(clocks, ARRAY_SIZE(clocks));
return 0;
}
-arch_initcall(ep93xx_clock_init);
+postcore_initcall(ep93xx_clock_init);
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c
index 61cd4d6..24498a9 100644
--- a/arch/arm/mach-ixp4xx/common-pci.c
+++ b/arch/arm/mach-ixp4xx/common-pci.c
@@ -503,6 +503,14 @@
return pci_scan_bus(sys->busnr, &ixp4xx_ops, sys);
}
+int dma_set_coherent_mask(struct device *dev, u64 mask)
+{
+ if (mask >= SZ_64M - 1)
+ return 0;
+
+ return -EIO;
+}
+
EXPORT_SYMBOL(ixp4xx_pci_read);
EXPORT_SYMBOL(ixp4xx_pci_write);
diff --git a/arch/arm/mach-ixp4xx/include/mach/hardware.h b/arch/arm/mach-ixp4xx/include/mach/hardware.h
index f91ca6d..8138371 100644
--- a/arch/arm/mach-ixp4xx/include/mach/hardware.h
+++ b/arch/arm/mach-ixp4xx/include/mach/hardware.h
@@ -26,6 +26,8 @@
#define PCIBIOS_MAX_MEM 0x4BFFFFFF
#endif
+#define ARCH_HAS_DMA_SET_COHERENT_MASK
+
#define pcibios_assign_all_busses() 1
/* Register locations and bits */
diff --git a/arch/arm/mach-kirkwood/include/mach/kirkwood.h b/arch/arm/mach-kirkwood/include/mach/kirkwood.h
index 93fc2ec..6e924b3 100644
--- a/arch/arm/mach-kirkwood/include/mach/kirkwood.h
+++ b/arch/arm/mach-kirkwood/include/mach/kirkwood.h
@@ -38,7 +38,7 @@
#define KIRKWOOD_PCIE1_IO_PHYS_BASE 0xf3000000
#define KIRKWOOD_PCIE1_IO_VIRT_BASE 0xfef00000
-#define KIRKWOOD_PCIE1_IO_BUS_BASE 0x00000000
+#define KIRKWOOD_PCIE1_IO_BUS_BASE 0x00100000
#define KIRKWOOD_PCIE1_IO_SIZE SZ_1M
#define KIRKWOOD_PCIE_IO_PHYS_BASE 0xf2000000
diff --git a/arch/arm/mach-kirkwood/pcie.c b/arch/arm/mach-kirkwood/pcie.c
index 55e7f00..513ad31 100644
--- a/arch/arm/mach-kirkwood/pcie.c
+++ b/arch/arm/mach-kirkwood/pcie.c
@@ -117,7 +117,7 @@
* IORESOURCE_IO
*/
pp->res[0].name = "PCIe 0 I/O Space";
- pp->res[0].start = KIRKWOOD_PCIE_IO_PHYS_BASE;
+ pp->res[0].start = KIRKWOOD_PCIE_IO_BUS_BASE;
pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE_IO_SIZE - 1;
pp->res[0].flags = IORESOURCE_IO;
@@ -139,7 +139,7 @@
* IORESOURCE_IO
*/
pp->res[0].name = "PCIe 1 I/O Space";
- pp->res[0].start = KIRKWOOD_PCIE1_IO_PHYS_BASE;
+ pp->res[0].start = KIRKWOOD_PCIE1_IO_BUS_BASE;
pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE1_IO_SIZE - 1;
pp->res[0].flags = IORESOURCE_IO;
diff --git a/arch/arm/mach-mmp/include/mach/system.h b/arch/arm/mach-mmp/include/mach/system.h
index 4f5b0e0..1a8a25e 100644
--- a/arch/arm/mach-mmp/include/mach/system.h
+++ b/arch/arm/mach-mmp/include/mach/system.h
@@ -9,6 +9,8 @@
#ifndef __ASM_MACH_SYSTEM_H
#define __ASM_MACH_SYSTEM_H
+#include <mach/cputype.h>
+
static inline void arch_idle(void)
{
cpu_do_idle();
@@ -16,6 +18,9 @@
static inline void arch_reset(char mode, const char *cmd)
{
- cpu_reset(0);
+ if (cpu_is_pxa168())
+ cpu_reset(0xffff0000);
+ else
+ cpu_reset(0);
}
#endif /* __ASM_MACH_SYSTEM_H */
diff --git a/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c b/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c
index 91931dc..4aaadc7 100644
--- a/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c
+++ b/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c
@@ -215,7 +215,7 @@
* Add platform devices present on this baseboard and init
* them from CPU side as far as required to use them later on
*/
-void __init eukrea_mbimxsd_baseboard_init(void)
+void __init eukrea_mbimxsd25_baseboard_init(void)
{
if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads,
ARRAY_SIZE(eukrea_mbimxsd_pads)))
diff --git a/arch/arm/mach-mx25/mach-cpuimx25.c b/arch/arm/mach-mx25/mach-cpuimx25.c
index a5f0174..e064bb3 100644
--- a/arch/arm/mach-mx25/mach-cpuimx25.c
+++ b/arch/arm/mach-mx25/mach-cpuimx25.c
@@ -147,8 +147,8 @@
if (!otg_mode_host)
mxc_register_device(&otg_udc_device, &otg_device_pdata);
-#ifdef CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD
- eukrea_mbimxsd_baseboard_init();
+#ifdef CONFIG_MACH_EUKREA_MBIMXSD25_BASEBOARD
+ eukrea_mbimxsd25_baseboard_init();
#endif
}
diff --git a/arch/arm/mach-mx3/clock-imx35.c b/arch/arm/mach-mx3/clock-imx35.c
index d3af0fd..7a62e74 100644
--- a/arch/arm/mach-mx3/clock-imx35.c
+++ b/arch/arm/mach-mx3/clock-imx35.c
@@ -155,7 +155,7 @@
aad = &clk_consumer[(pdr0 >> 16) & 0xf];
if (aad->sel)
- fref = fref * 2 / 3;
+ fref = fref * 3 / 4;
return fref / aad->arm;
}
@@ -164,7 +164,7 @@
{
unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0);
struct arm_ahb_div *aad;
- unsigned long fref = get_rate_mpll();
+ unsigned long fref = get_rate_arm();
aad = &clk_consumer[(pdr0 >> 16) & 0xf];
@@ -176,16 +176,11 @@
return get_rate_ahb(NULL) >> 1;
}
-static unsigned long get_3_3_div(unsigned long in)
-{
- return (((in >> 3) & 0x7) + 1) * ((in & 0x7) + 1);
-}
-
static unsigned long get_rate_uart(struct clk *clk)
{
unsigned long pdr3 = __raw_readl(CCM_BASE + CCM_PDR3);
unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4);
- unsigned long div = get_3_3_div(pdr4 >> 10);
+ unsigned long div = ((pdr4 >> 10) & 0x3f) + 1;
if (pdr3 & (1 << 14))
return get_rate_arm() / div;
@@ -216,7 +211,7 @@
break;
}
- return rate / get_3_3_div(div);
+ return rate / (div + 1);
}
static unsigned long get_rate_mshc(struct clk *clk)
@@ -270,7 +265,7 @@
else
rate = get_rate_ppll();
- return rate / get_3_3_div((pdr2 >> 16) & 0x3f);
+ return rate / (((pdr2 >> 16) & 0x3f) + 1);
}
static unsigned long get_rate_otg(struct clk *clk)
@@ -283,25 +278,51 @@
else
rate = get_rate_ppll();
- return rate / get_3_3_div((pdr4 >> 22) & 0x3f);
+ return rate / (((pdr4 >> 22) & 0x3f) + 1);
}
static unsigned long get_rate_ipg_per(struct clk *clk)
{
unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0);
unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4);
- unsigned long div1, div2;
+ unsigned long div;
if (pdr0 & (1 << 26)) {
- div1 = (pdr4 >> 19) & 0x7;
- div2 = (pdr4 >> 16) & 0x7;
- return get_rate_arm() / ((div1 + 1) * (div2 + 1));
+ div = (pdr4 >> 16) & 0x3f;
+ return get_rate_arm() / (div + 1);
} else {
- div1 = (pdr0 >> 12) & 0x7;
- return get_rate_ahb(NULL) / div1;
+ div = (pdr0 >> 12) & 0x7;
+ return get_rate_ahb(NULL) / (div + 1);
}
}
+static unsigned long get_rate_hsp(struct clk *clk)
+{
+ unsigned long hsp_podf = (__raw_readl(CCM_BASE + CCM_PDR0) >> 20) & 0x03;
+ unsigned long fref = get_rate_mpll();
+
+ if (fref > 400 * 1000 * 1000) {
+ switch (hsp_podf) {
+ case 0:
+ return fref >> 2;
+ case 1:
+ return fref >> 3;
+ case 2:
+ return fref / 3;
+ }
+ } else {
+ switch (hsp_podf) {
+ case 0:
+ case 2:
+ return fref / 3;
+ case 1:
+ return fref / 6;
+ }
+ }
+
+ return 0;
+}
+
static int clk_cgr_enable(struct clk *clk)
{
u32 reg;
@@ -359,7 +380,7 @@
DEFINE_CLOCK(i2c2_clk, 1, CCM_CGR1, 12, get_rate_ipg_per, NULL);
DEFINE_CLOCK(i2c3_clk, 2, CCM_CGR1, 14, get_rate_ipg_per, NULL);
DEFINE_CLOCK(iomuxc_clk, 0, CCM_CGR1, 16, NULL, NULL);
-DEFINE_CLOCK(ipu_clk, 0, CCM_CGR1, 18, get_rate_ahb, NULL);
+DEFINE_CLOCK(ipu_clk, 0, CCM_CGR1, 18, get_rate_hsp, NULL);
DEFINE_CLOCK(kpp_clk, 0, CCM_CGR1, 20, get_rate_ipg, NULL);
DEFINE_CLOCK(mlb_clk, 0, CCM_CGR1, 22, get_rate_ahb, NULL);
DEFINE_CLOCK(mshc_clk, 0, CCM_CGR1, 24, get_rate_mshc, NULL);
@@ -485,10 +506,10 @@
int __init mx35_clocks_init()
{
- unsigned int ll = 0;
+ unsigned int cgr2 = 3 << 26, cgr3 = 0;
#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
- ll = (3 << 16);
+ cgr2 |= 3 << 16;
#endif
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
@@ -499,8 +520,20 @@
__raw_writel((3 << 18), CCM_BASE + CCM_CGR0);
__raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16),
CCM_BASE + CCM_CGR1);
- __raw_writel((3 << 26) | ll, CCM_BASE + CCM_CGR2);
- __raw_writel(0, CCM_BASE + CCM_CGR3);
+
+ /*
+ * Check if we came up in internal boot mode. If yes, we need some
+ * extra clocks turned on, otherwise the MX35 boot ROM code will
+ * hang after a watchdog reset.
+ */
+ if (!(__raw_readl(CCM_BASE + CCM_RCSR) & (3 << 10))) {
+ /* Additionally turn on UART1, SCC, and IIM clocks */
+ cgr2 |= 3 << 16 | 3 << 4;
+ cgr3 |= 3 << 2;
+ }
+
+ __raw_writel(cgr2, CCM_BASE + CCM_CGR2);
+ __raw_writel(cgr3, CCM_BASE + CCM_CGR3);
mxc_timer_init(&gpt_clk,
MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT);
diff --git a/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c b/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c
index 1dc5004..f8f15e3 100644
--- a/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c
+++ b/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c
@@ -216,7 +216,7 @@
* Add platform devices present on this baseboard and init
* them from CPU side as far as required to use them later on
*/
-void __init eukrea_mbimxsd_baseboard_init(void)
+void __init eukrea_mbimxsd35_baseboard_init(void)
{
if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads,
ARRAY_SIZE(eukrea_mbimxsd_pads)))
diff --git a/arch/arm/mach-mx3/mach-cpuimx35.c b/arch/arm/mach-mx3/mach-cpuimx35.c
index 9770a6a..2a4f8b7 100644
--- a/arch/arm/mach-mx3/mach-cpuimx35.c
+++ b/arch/arm/mach-mx3/mach-cpuimx35.c
@@ -201,8 +201,8 @@
if (!otg_mode_host)
mxc_register_device(&mxc_otg_udc_device, &otg_device_pdata);
-#ifdef CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD
- eukrea_mbimxsd_baseboard_init();
+#ifdef CONFIG_MACH_EUKREA_MBIMXSD35_BASEBOARD
+ eukrea_mbimxsd35_baseboard_init();
#endif
}
diff --git a/arch/arm/mach-mx5/clock-mx51.c b/arch/arm/mach-mx5/clock-mx51.c
index 6af69de..57c10a9 100644
--- a/arch/arm/mach-mx5/clock-mx51.c
+++ b/arch/arm/mach-mx5/clock-mx51.c
@@ -56,7 +56,7 @@
{
u32 reg;
reg = __raw_readl(clk->enable_reg);
- reg &= ~(MXC_CCM_CCGRx_MOD_OFF << clk->enable_shift);
+ reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift);
__raw_writel(reg, clk->enable_reg);
}
diff --git a/arch/arm/mach-pxa/cpufreq-pxa2xx.c b/arch/arm/mach-pxa/cpufreq-pxa2xx.c
index 268a9bc..58093d9 100644
--- a/arch/arm/mach-pxa/cpufreq-pxa2xx.c
+++ b/arch/arm/mach-pxa/cpufreq-pxa2xx.c
@@ -312,8 +312,7 @@
freqs.cpu = policy->cpu;
if (freq_debug)
- pr_debug(KERN_INFO "Changing CPU frequency to %d Mhz, "
- "(SDRAM %d Mhz)\n",
+ pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n",
freqs.new / 1000, (pxa_freq_settings[idx].div2) ?
(new_freq_mem / 2000) : (new_freq_mem / 1000));
@@ -398,7 +397,7 @@
return 0;
}
-static __init int pxa_cpufreq_init(struct cpufreq_policy *policy)
+static int pxa_cpufreq_init(struct cpufreq_policy *policy)
{
int i;
unsigned int freq;
diff --git a/arch/arm/mach-pxa/cpufreq-pxa3xx.c b/arch/arm/mach-pxa/cpufreq-pxa3xx.c
index 27fa329..0a0d0fe 100644
--- a/arch/arm/mach-pxa/cpufreq-pxa3xx.c
+++ b/arch/arm/mach-pxa/cpufreq-pxa3xx.c
@@ -204,7 +204,7 @@
return 0;
}
-static __init int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
+static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
{
int ret = -EINVAL;
diff --git a/arch/arm/mach-pxa/include/mach/hardware.h b/arch/arm/mach-pxa/include/mach/hardware.h
index 7f64d24..814f145 100644
--- a/arch/arm/mach-pxa/include/mach/hardware.h
+++ b/arch/arm/mach-pxa/include/mach/hardware.h
@@ -264,23 +264,35 @@
* <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x
* == 0x3 for pxa300/pxa310/pxa320
*/
+#if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x)
#define __cpu_is_pxa2xx(id) \
({ \
unsigned int _id = (id) >> 13 & 0x7; \
_id <= 0x2; \
})
+#else
+#define __cpu_is_pxa2xx(id) (0)
+#endif
+#ifdef CONFIG_PXA3xx
#define __cpu_is_pxa3xx(id) \
({ \
unsigned int _id = (id) >> 13 & 0x7; \
_id == 0x3; \
})
+#else
+#define __cpu_is_pxa3xx(id) (0)
+#endif
+#if defined(CONFIG_CPU_PXA930) || defined(CONFIG_CPU_PXA935)
#define __cpu_is_pxa93x(id) \
({ \
unsigned int _id = (id) >> 4 & 0xfff; \
_id == 0x683 || _id == 0x693; \
})
+#else
+#define __cpu_is_pxa93x(id) (0)
+#endif
#define cpu_is_pxa2xx() \
({ \
@@ -309,7 +321,7 @@
#define PCIBIOS_MIN_IO 0
#define PCIBIOS_MIN_MEM 0
#define pcibios_assign_all_busses() 1
+#define ARCH_HAS_DMA_SET_COHERENT_MASK
#endif
-
#endif /* _ASM_ARCH_HARDWARE_H */
diff --git a/arch/arm/mach-pxa/include/mach/io.h b/arch/arm/mach-pxa/include/mach/io.h
index 262691f..fdca3be 100644
--- a/arch/arm/mach-pxa/include/mach/io.h
+++ b/arch/arm/mach-pxa/include/mach/io.h
@@ -6,6 +6,8 @@
#ifndef __ASM_ARM_ARCH_IO_H
#define __ASM_ARM_ARCH_IO_H
+#include <mach/hardware.h>
+
#define IO_SPACE_LIMIT 0xffffffff
/*
diff --git a/arch/arm/mach-pxa/include/mach/mfp-pxa300.h b/arch/arm/mach-pxa/include/mach/mfp-pxa300.h
index 7139e0d..4e12870 100644
--- a/arch/arm/mach-pxa/include/mach/mfp-pxa300.h
+++ b/arch/arm/mach-pxa/include/mach/mfp-pxa300.h
@@ -71,10 +71,10 @@
#define GPIO46_CI_DD_7 MFP_CFG_DRV(GPIO46, AF0, DS04X)
#define GPIO47_CI_DD_8 MFP_CFG_DRV(GPIO47, AF1, DS04X)
#define GPIO48_CI_DD_9 MFP_CFG_DRV(GPIO48, AF1, DS04X)
-#define GPIO52_CI_HSYNC MFP_CFG_DRV(GPIO52, AF0, DS04X)
-#define GPIO51_CI_VSYNC MFP_CFG_DRV(GPIO51, AF0, DS04X)
#define GPIO49_CI_MCLK MFP_CFG_DRV(GPIO49, AF0, DS04X)
#define GPIO50_CI_PCLK MFP_CFG_DRV(GPIO50, AF0, DS04X)
+#define GPIO51_CI_HSYNC MFP_CFG_DRV(GPIO51, AF0, DS04X)
+#define GPIO52_CI_VSYNC MFP_CFG_DRV(GPIO52, AF0, DS04X)
/* KEYPAD */
#define GPIO3_KP_DKIN_6 MFP_CFG_LPM(GPIO3, AF2, FLOAT)
diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c
index 77ad6d3..405b92a 100644
--- a/arch/arm/mach-pxa/palm27x.c
+++ b/arch/arm/mach-pxa/palm27x.c
@@ -469,9 +469,13 @@
},
};
+static struct i2c_pxa_platform_data palm27x_i2c_power_info = {
+ .use_pio = 1,
+};
+
void __init palm27x_pmic_init(void)
{
i2c_register_board_info(1, ARRAY_AND_SIZE(palm27x_pi2c_board_info));
- pxa27x_set_i2c_power_info(NULL);
+ pxa27x_set_i2c_power_info(&palm27x_i2c_power_info);
}
#endif
diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c
index c9b747c..37d6173 100644
--- a/arch/arm/mach-pxa/vpac270.c
+++ b/arch/arm/mach-pxa/vpac270.c
@@ -240,6 +240,7 @@
#if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE)
static struct pxamci_platform_data vpac270_mci_platform_data = {
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
+ .gpio_power = -1,
.gpio_card_detect = GPIO53_VPAC270_SD_DETECT_N,
.gpio_card_ro = GPIO52_VPAC270_SD_READONLY,
.detect_delay_ms = 200,
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
index 2fa38df..07c0815 100644
--- a/arch/arm/mach-realview/core.c
+++ b/arch/arm/mach-realview/core.c
@@ -259,6 +259,7 @@
.status = realview_mmc_status,
.gpio_wp = 17,
.gpio_cd = 16,
+ .cd_invert = true,
};
struct mmci_platform_data realview_mmc1_plat_data = {
@@ -266,6 +267,7 @@
.status = realview_mmc_status,
.gpio_wp = 19,
.gpio_cd = 18,
+ .cd_invert = true,
};
/*
diff --git a/arch/arm/mach-realview/include/mach/smp.h b/arch/arm/mach-realview/include/mach/smp.h
index dd53892..d3cd265 100644
--- a/arch/arm/mach-realview/include/mach/smp.h
+++ b/arch/arm/mach-realview/include/mach/smp.h
@@ -1,16 +1,8 @@
#ifndef ASMARM_ARCH_SMP_H
#define ASMARM_ARCH_SMP_H
-
#include <asm/hardware/gic.h>
-
-#define hard_smp_processor_id() \
- ({ \
- unsigned int cpunum; \
- __asm__("mrc p15, 0, %0, c0, c0, 5" \
- : "=r" (cpunum)); \
- cpunum &= 0x0F; \
- })
+#include <asm/smp_mpidr.h>
/*
* We use IRQ1 as the IPI
diff --git a/arch/arm/mach-s3c64xx/dev-spi.c b/arch/arm/mach-s3c64xx/dev-spi.c
index a492b98..405e621 100644
--- a/arch/arm/mach-s3c64xx/dev-spi.c
+++ b/arch/arm/mach-s3c64xx/dev-spi.c
@@ -18,10 +18,11 @@
#include <mach/map.h>
#include <mach/gpio-bank-c.h>
#include <mach/spi-clocks.h>
+#include <mach/irqs.h>
#include <plat/s3c64xx-spi.h>
#include <plat/gpio-cfg.h>
-#include <plat/irqs.h>
+#include <plat/devs.h>
static char *spi_src_clks[] = {
[S3C64XX_SPI_SRCCLK_PCLK] = "pclk",
diff --git a/arch/arm/mach-s3c64xx/mach-real6410.c b/arch/arm/mach-s3c64xx/mach-real6410.c
index 5c07d01..e130379 100644
--- a/arch/arm/mach-s3c64xx/mach-real6410.c
+++ b/arch/arm/mach-s3c64xx/mach-real6410.c
@@ -30,73 +30,73 @@
#include <plat/devs.h>
#include <plat/regs-serial.h>
-#define UCON S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK
-#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB
-#define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE
+#define UCON (S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK)
+#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB)
+#define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
static struct s3c2410_uartcfg real6410_uartcfgs[] __initdata = {
[0] = {
- .hwport = 0,
- .flags = 0,
- .ucon = UCON,
- .ulcon = ULCON,
- .ufcon = UFCON,
+ .hwport = 0,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
},
[1] = {
- .hwport = 1,
- .flags = 0,
- .ucon = UCON,
- .ulcon = ULCON,
- .ufcon = UFCON,
+ .hwport = 1,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
},
[2] = {
- .hwport = 2,
- .flags = 0,
- .ucon = UCON,
- .ulcon = ULCON,
- .ufcon = UFCON,
+ .hwport = 2,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
},
[3] = {
- .hwport = 3,
- .flags = 0,
- .ucon = UCON,
- .ulcon = ULCON,
- .ufcon = UFCON,
+ .hwport = 3,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
},
};
/* DM9000AEP 10/100 ethernet controller */
static struct resource real6410_dm9k_resource[] = {
- [0] = {
- .start = S3C64XX_PA_XM0CSN1,
- .end = S3C64XX_PA_XM0CSN1 + 1,
- .flags = IORESOURCE_MEM
- },
- [1] = {
- .start = S3C64XX_PA_XM0CSN1 + 4,
- .end = S3C64XX_PA_XM0CSN1 + 5,
- .flags = IORESOURCE_MEM
- },
- [2] = {
- .start = S3C_EINT(7),
- .end = S3C_EINT(7),
- .flags = IORESOURCE_IRQ,
- }
+ [0] = {
+ .start = S3C64XX_PA_XM0CSN1,
+ .end = S3C64XX_PA_XM0CSN1 + 1,
+ .flags = IORESOURCE_MEM
+ },
+ [1] = {
+ .start = S3C64XX_PA_XM0CSN1 + 4,
+ .end = S3C64XX_PA_XM0CSN1 + 5,
+ .flags = IORESOURCE_MEM
+ },
+ [2] = {
+ .start = S3C_EINT(7),
+ .end = S3C_EINT(7),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL
+ }
};
static struct dm9000_plat_data real6410_dm9k_pdata = {
- .flags = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM),
+ .flags = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM),
};
static struct platform_device real6410_device_eth = {
- .name = "dm9000",
- .id = -1,
- .num_resources = ARRAY_SIZE(real6410_dm9k_resource),
- .resource = real6410_dm9k_resource,
- .dev = {
- .platform_data = &real6410_dm9k_pdata,
- },
+ .name = "dm9000",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(real6410_dm9k_resource),
+ .resource = real6410_dm9k_resource,
+ .dev = {
+ .platform_data = &real6410_dm9k_pdata,
+ },
};
static struct platform_device *real6410_devices[] __initdata = {
@@ -129,12 +129,12 @@
/* set timing for nCS1 suitable for ethernet chip */
__raw_writel((0 << S3C64XX_SROM_BCX__PMC__SHIFT) |
- (6 << S3C64XX_SROM_BCX__TACP__SHIFT) |
- (4 << S3C64XX_SROM_BCX__TCAH__SHIFT) |
- (1 << S3C64XX_SROM_BCX__TCOH__SHIFT) |
- (13 << S3C64XX_SROM_BCX__TACC__SHIFT) |
- (4 << S3C64XX_SROM_BCX__TCOS__SHIFT) |
- (0 << S3C64XX_SROM_BCX__TACS__SHIFT), S3C64XX_SROM_BC1);
+ (6 << S3C64XX_SROM_BCX__TACP__SHIFT) |
+ (4 << S3C64XX_SROM_BCX__TCAH__SHIFT) |
+ (1 << S3C64XX_SROM_BCX__TCOH__SHIFT) |
+ (13 << S3C64XX_SROM_BCX__TACC__SHIFT) |
+ (4 << S3C64XX_SROM_BCX__TCOS__SHIFT) |
+ (0 << S3C64XX_SROM_BCX__TACS__SHIFT), S3C64XX_SROM_BC1);
platform_add_devices(real6410_devices, ARRAY_SIZE(real6410_devices));
}
diff --git a/arch/arm/mach-s3c64xx/mach-smartq.c b/arch/arm/mach-s3c64xx/mach-smartq.c
index 3a9639b..cb1ebeb 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq.c
@@ -136,7 +136,7 @@
.dev.platform_data = &smartq_usb_otg_vbus_pdata,
};
-static int __init smartq_bl_init(struct device *dev)
+static int smartq_bl_init(struct device *dev)
{
s3c_gpio_cfgpin(S3C64XX_GPF(15), S3C_GPIO_SFN(2));
diff --git a/arch/arm/mach-s3c64xx/mach-smartq5.c b/arch/arm/mach-s3c64xx/mach-smartq5.c
index a4d59b0..235e439 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq5.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq5.c
@@ -32,7 +32,7 @@
#include "mach-smartq.h"
-static struct gpio_led smartq5_leds[] __initdata = {
+static struct gpio_led smartq5_leds[] = {
{
.name = "smartq5:green",
.active_low = 1,
diff --git a/arch/arm/mach-s3c64xx/mach-smartq7.c b/arch/arm/mach-s3c64xx/mach-smartq7.c
index e50a7d7..78a58c3 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq7.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq7.c
@@ -32,7 +32,7 @@
#include "mach-smartq.h"
-static struct gpio_led smartq7_leds[] __initdata = {
+static struct gpio_led smartq7_leds[] = {
{
.name = "smartq7:red",
.active_low = 1,
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c
index af91fef..cfecd70 100644
--- a/arch/arm/mach-s5pv210/clock.c
+++ b/arch/arm/mach-s5pv210/clock.c
@@ -281,6 +281,24 @@
.enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1<<29),
}, {
+ .name = "fimc",
+ .id = 0,
+ .parent = &clk_hclk_dsys.clk,
+ .enable = s5pv210_clk_ip0_ctrl,
+ .ctrlbit = (1 << 24),
+ }, {
+ .name = "fimc",
+ .id = 1,
+ .parent = &clk_hclk_dsys.clk,
+ .enable = s5pv210_clk_ip0_ctrl,
+ .ctrlbit = (1 << 25),
+ }, {
+ .name = "fimc",
+ .id = 2,
+ .parent = &clk_hclk_dsys.clk,
+ .enable = s5pv210_clk_ip0_ctrl,
+ .ctrlbit = (1 << 26),
+ }, {
.name = "otg",
.id = -1,
.parent = &clk_hclk_psys.clk,
@@ -357,7 +375,7 @@
.id = 1,
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
- .ctrlbit = (1<<8),
+ .ctrlbit = (1 << 10),
}, {
.name = "i2c",
.id = 2,
diff --git a/arch/arm/mach-s5pv210/cpu.c b/arch/arm/mach-s5pv210/cpu.c
index b9f4d67..77f456c 100644
--- a/arch/arm/mach-s5pv210/cpu.c
+++ b/arch/arm/mach-s5pv210/cpu.c
@@ -47,7 +47,7 @@
{
.virtual = (unsigned long)S5P_VA_SYSTIMER,
.pfn = __phys_to_pfn(S5PV210_PA_SYSTIMER),
- .length = SZ_1M,
+ .length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)VA_VIC2,
diff --git a/arch/arm/mach-s5pv310/include/mach/smp.h b/arch/arm/mach-s5pv310/include/mach/smp.h
index 990f3ba..b7ec252 100644
--- a/arch/arm/mach-s5pv310/include/mach/smp.h
+++ b/arch/arm/mach-s5pv310/include/mach/smp.h
@@ -7,17 +7,10 @@
#define ASM_ARCH_SMP_H __FILE__
#include <asm/hardware/gic.h>
+#include <asm/smp_mpidr.h>
extern void __iomem *gic_cpu_base_addr;
-#define hard_smp_processor_id() \
- ({ \
- unsigned int cpunum; \
- __asm__("mrc p15, 0, %0, c0, c0, 5" \
- : "=r" (cpunum)); \
- cpunum &= 0x03; \
- })
-
/*
* We use IRQ1 as the IPI
*/
diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile
index 5e16b4c..ae416fe 100644
--- a/arch/arm/mach-shmobile/Makefile
+++ b/arch/arm/mach-shmobile/Makefile
@@ -3,7 +3,7 @@
#
# Common objects
-obj-y := timer.o console.o clock.o
+obj-y := timer.o console.o clock.o pm_runtime.o
# CPU objects
obj-$(CONFIG_ARCH_SH7367) += setup-sh7367.o clock-sh7367.o intc-sh7367.o
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index 23d472f..95935c8 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -25,6 +25,7 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/mfd/sh_mobile_sdhi.h>
+#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
@@ -39,6 +40,7 @@
#include <linux/sh_clk.h>
#include <linux/gpio.h>
#include <linux/input.h>
+#include <linux/leds.h>
#include <linux/input/sh_keysc.h>
#include <linux/usb/r8a66597.h>
@@ -307,6 +309,7 @@
.dma_slave_tx = SHDMA_SLAVE_SDHI1_TX,
.dma_slave_rx = SHDMA_SLAVE_SDHI1_RX,
.tmio_ocr_mask = MMC_VDD_165_195,
+ .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE,
};
static struct resource sdhi1_resources[] = {
@@ -558,7 +561,7 @@
static struct platform_device fsi_device = {
.name = "sh_fsi2",
- .id = 0,
+ .id = -1,
.num_resources = ARRAY_SIZE(fsi_resources),
.resource = fsi_resources,
.dev = {
@@ -650,7 +653,44 @@
},
};
+static struct gpio_led ap4evb_leds[] = {
+ {
+ .name = "led4",
+ .gpio = GPIO_PORT185,
+ .default_state = LEDS_GPIO_DEFSTATE_ON,
+ },
+ {
+ .name = "led2",
+ .gpio = GPIO_PORT186,
+ .default_state = LEDS_GPIO_DEFSTATE_ON,
+ },
+ {
+ .name = "led3",
+ .gpio = GPIO_PORT187,
+ .default_state = LEDS_GPIO_DEFSTATE_ON,
+ },
+ {
+ .name = "led1",
+ .gpio = GPIO_PORT188,
+ .default_state = LEDS_GPIO_DEFSTATE_ON,
+ }
+};
+
+static struct gpio_led_platform_data ap4evb_leds_pdata = {
+ .num_leds = ARRAY_SIZE(ap4evb_leds),
+ .leds = ap4evb_leds,
+};
+
+static struct platform_device leds_device = {
+ .name = "leds-gpio",
+ .id = 0,
+ .dev = {
+ .platform_data = &ap4evb_leds_pdata,
+ },
+};
+
static struct platform_device *ap4evb_devices[] __initdata = {
+ &leds_device,
&nor_flash_device,
&smc911x_device,
&sdhi0_device,
@@ -840,20 +880,6 @@
gpio_request(GPIO_FN_CS5A, NULL);
gpio_request(GPIO_FN_IRQ6_39, NULL);
- /* enable LED 1 - 4 */
- gpio_request(GPIO_PORT185, NULL);
- gpio_request(GPIO_PORT186, NULL);
- gpio_request(GPIO_PORT187, NULL);
- gpio_request(GPIO_PORT188, NULL);
- gpio_direction_output(GPIO_PORT185, 1);
- gpio_direction_output(GPIO_PORT186, 1);
- gpio_direction_output(GPIO_PORT187, 1);
- gpio_direction_output(GPIO_PORT188, 1);
- gpio_export(GPIO_PORT185, 0);
- gpio_export(GPIO_PORT186, 0);
- gpio_export(GPIO_PORT187, 0);
- gpio_export(GPIO_PORT188, 0);
-
/* enable Debug switch (S6) */
gpio_request(GPIO_PORT32, NULL);
gpio_request(GPIO_PORT33, NULL);
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index fb4e9b1..7594689 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -286,7 +286,6 @@
struct clk pllc2_clk = {
.ops = &pllc2_clk_ops,
- .flags = CLK_ENABLE_ON_INIT,
.parent = &extal1_div2_clk,
.freq_table = pllc2_freq_table,
.parent_table = pllc2_parent,
@@ -395,7 +394,7 @@
enum { MSTP001,
MSTP131, MSTP130,
- MSTP129, MSTP128,
+ MSTP129, MSTP128, MSTP127, MSTP126,
MSTP118, MSTP117, MSTP116,
MSTP106, MSTP101, MSTP100,
MSTP223,
@@ -413,6 +412,8 @@
[MSTP130] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 30, 0), /* VEU2 */
[MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* VEU1 */
[MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* VEU0 */
+ [MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU */
+ [MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2 */
[MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX */
[MSTP117] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */
[MSTP116] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */
@@ -428,7 +429,7 @@
[MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */
[MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */
[MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
- [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, CLK_ENABLE_ON_INIT), /* FSIA */
+ [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSIA */
[MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */
[MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */
[MSTP314] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */
@@ -498,6 +499,8 @@
CLKDEV_DEV_ID("uio_pdrv_genirq.3", &mstp_clks[MSTP130]), /* VEU2 */
CLKDEV_DEV_ID("uio_pdrv_genirq.2", &mstp_clks[MSTP129]), /* VEU1 */
CLKDEV_DEV_ID("uio_pdrv_genirq.1", &mstp_clks[MSTP128]), /* VEU0 */
+ CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU */
+ CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2 */
CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */
CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]), /* LCDC1 */
CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* IIC0 */
diff --git a/arch/arm/mach-shmobile/clock.c b/arch/arm/mach-shmobile/clock.c
index b7c705a..6b7c7c4 100644
--- a/arch/arm/mach-shmobile/clock.c
+++ b/arch/arm/mach-shmobile/clock.c
@@ -1,8 +1,10 @@
/*
- * SH-Mobile Timer
+ * SH-Mobile Clock Framework
*
* Copyright (C) 2010 Magnus Damm
*
+ * Used together with arch/arm/common/clkdev.c and drivers/sh/clk.c.
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c
new file mode 100644
index 0000000..94912d3
--- /dev/null
+++ b/arch/arm/mach-shmobile/pm_runtime.c
@@ -0,0 +1,169 @@
+/*
+ * arch/arm/mach-shmobile/pm_runtime.c
+ *
+ * Runtime PM support code for SuperH Mobile ARM
+ *
+ * Copyright (C) 2009-2010 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/sh_clk.h>
+#include <linux/bitmap.h>
+
+#ifdef CONFIG_PM_RUNTIME
+#define BIT_ONCE 0
+#define BIT_ACTIVE 1
+#define BIT_CLK_ENABLED 2
+
+struct pm_runtime_data {
+ unsigned long flags;
+ struct clk *clk;
+};
+
+static void __devres_release(struct device *dev, void *res)
+{
+ struct pm_runtime_data *prd = res;
+
+ dev_dbg(dev, "__devres_release()\n");
+
+ if (test_bit(BIT_CLK_ENABLED, &prd->flags))
+ clk_disable(prd->clk);
+
+ if (test_bit(BIT_ACTIVE, &prd->flags))
+ clk_put(prd->clk);
+}
+
+static struct pm_runtime_data *__to_prd(struct device *dev)
+{
+ return devres_find(dev, __devres_release, NULL, NULL);
+}
+
+static void platform_pm_runtime_init(struct device *dev,
+ struct pm_runtime_data *prd)
+{
+ if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) {
+ prd->clk = clk_get(dev, NULL);
+ if (!IS_ERR(prd->clk)) {
+ set_bit(BIT_ACTIVE, &prd->flags);
+ dev_info(dev, "clocks managed by runtime pm\n");
+ }
+ }
+}
+
+static void platform_pm_runtime_bug(struct device *dev,
+ struct pm_runtime_data *prd)
+{
+ if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags))
+ dev_err(dev, "runtime pm suspend before resume\n");
+}
+
+int platform_pm_runtime_suspend(struct device *dev)
+{
+ struct pm_runtime_data *prd = __to_prd(dev);
+
+ dev_dbg(dev, "platform_pm_runtime_suspend()\n");
+
+ platform_pm_runtime_bug(dev, prd);
+
+ if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
+ clk_disable(prd->clk);
+ clear_bit(BIT_CLK_ENABLED, &prd->flags);
+ }
+
+ return 0;
+}
+
+int platform_pm_runtime_resume(struct device *dev)
+{
+ struct pm_runtime_data *prd = __to_prd(dev);
+
+ dev_dbg(dev, "platform_pm_runtime_resume()\n");
+
+ platform_pm_runtime_init(dev, prd);
+
+ if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
+ clk_enable(prd->clk);
+ set_bit(BIT_CLK_ENABLED, &prd->flags);
+ }
+
+ return 0;
+}
+
+int platform_pm_runtime_idle(struct device *dev)
+{
+ /* suspend synchronously to disable clocks immediately */
+ return pm_runtime_suspend(dev);
+}
+
+static int platform_bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct pm_runtime_data *prd;
+
+ dev_dbg(dev, "platform_bus_notify() %ld !\n", action);
+
+ if (action == BUS_NOTIFY_BIND_DRIVER) {
+ prd = devres_alloc(__devres_release, sizeof(*prd), GFP_KERNEL);
+ if (prd)
+ devres_add(dev, prd);
+ else
+ dev_err(dev, "unable to alloc memory for runtime pm\n");
+ }
+
+ return 0;
+}
+
+#else /* CONFIG_PM_RUNTIME */
+
+static int platform_bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct clk *clk;
+
+ dev_dbg(dev, "platform_bus_notify() %ld !\n", action);
+
+ switch (action) {
+ case BUS_NOTIFY_BIND_DRIVER:
+ clk = clk_get(dev, NULL);
+ if (!IS_ERR(clk)) {
+ clk_enable(clk);
+ clk_put(clk);
+ dev_info(dev, "runtime pm disabled, clock forced on\n");
+ }
+ break;
+ case BUS_NOTIFY_UNBOUND_DRIVER:
+ clk = clk_get(dev, NULL);
+ if (!IS_ERR(clk)) {
+ clk_disable(clk);
+ clk_put(clk);
+ dev_info(dev, "runtime pm disabled, clock forced off\n");
+ }
+ break;
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_PM_RUNTIME */
+
+static struct notifier_block platform_bus_notifier = {
+ .notifier_call = platform_bus_notify
+};
+
+static int __init sh_pm_runtime_init(void)
+{
+ bus_register_notifier(&platform_bus_type, &platform_bus_notifier);
+ return 0;
+}
+core_initcall(sh_pm_runtime_init);
diff --git a/arch/arm/mach-tegra/include/mach/smp.h b/arch/arm/mach-tegra/include/mach/smp.h
index 8b42dab..e4a34a3 100644
--- a/arch/arm/mach-tegra/include/mach/smp.h
+++ b/arch/arm/mach-tegra/include/mach/smp.h
@@ -1,16 +1,8 @@
#ifndef ASMARM_ARCH_SMP_H
#define ASMARM_ARCH_SMP_H
-
#include <asm/hardware/gic.h>
-
-#define hard_smp_processor_id() \
- ({ \
- unsigned int cpunum; \
- __asm__("mrc p15, 0, %0, c0, c0, 5" \
- : "=r" (cpunum)); \
- cpunum &= 0x0F; \
- })
+#include <asm/smp_mpidr.h>
/*
* We use IRQ1 as the IPI
diff --git a/arch/arm/mach-u300/include/mach/gpio.h b/arch/arm/mach-u300/include/mach/gpio.h
index 7b1fc98..d5a71ab 100644
--- a/arch/arm/mach-u300/include/mach/gpio.h
+++ b/arch/arm/mach-u300/include/mach/gpio.h
@@ -273,6 +273,9 @@
extern int gpio_get_value(unsigned gpio);
extern void gpio_set_value(unsigned gpio, int value);
+#define gpio_get_value_cansleep gpio_get_value
+#define gpio_set_value_cansleep gpio_set_value
+
/* wrappers to sleep-enable the previous two functions */
static inline unsigned gpio_to_irq(unsigned gpio)
{
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index 6625e5b..2dd44a0 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -21,9 +21,7 @@
bool "U8500 Development platform"
select UX500_SOC_DB8500
help
- Include support for mop500 development platform
- based on U8500 architecture. The platform is based
- on early drop silicon version of 8500.
+ Include support for the mop500 development platform.
config MACH_U5500
bool "U5500 Development platform"
@@ -39,4 +37,18 @@
Choose the UART on which kernel low-level debug messages should be
output.
+config U5500_MODEM_IRQ
+ bool "Modem IRQ support"
+ depends on MACH_U5500
+ default y
+ help
+ Add support for handling IRQ:s from modem side
+
+config U5500_MBOX
+ bool "Mailbox support"
+ depends on MACH_U5500 && U5500_MODEM_IRQ
+ default y
+ help
+ Add support for U5500 mailbox communication with modem side
+
endif
diff --git a/arch/arm/mach-ux500/Makefile b/arch/arm/mach-ux500/Makefile
index 4556aea..9e27a84 100644
--- a/arch/arm/mach-ux500/Makefile
+++ b/arch/arm/mach-ux500/Makefile
@@ -4,8 +4,12 @@
obj-y := clock.o cpu.o devices.o
obj-$(CONFIG_UX500_SOC_DB5500) += cpu-db5500.o devices-db5500.o
-obj-$(CONFIG_UX500_SOC_DB8500) += cpu-db8500.o devices-db8500.o
-obj-$(CONFIG_MACH_U8500_MOP) += board-mop500.o
+obj-$(CONFIG_UX500_SOC_DB8500) += cpu-db8500.o devices-db8500.o prcmu.o
+obj-$(CONFIG_MACH_U8500_MOP) += board-mop500.o board-mop500-sdi.o
obj-$(CONFIG_MACH_U5500) += board-u5500.o
obj-$(CONFIG_SMP) += platsmp.o headsmp.o
+obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o
+obj-$(CONFIG_REGULATOR_AB8500) += board-mop500-regulators.o
+obj-$(CONFIG_U5500_MODEM_IRQ) += modem_irq.o
+obj-$(CONFIG_U5500_MBOX) += mbox.o
diff --git a/arch/arm/mach-ux500/board-mop500-regulators.c b/arch/arm/mach-ux500/board-mop500-regulators.c
new file mode 100644
index 0000000..1187f1f
--- /dev/null
+++ b/arch/arm/mach-ux500/board-mop500-regulators.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Sundar Iyer <sundar.iyer@stericsson.com>
+ *
+ * MOP500 board specific initialization for regulators
+ */
+#include <linux/kernel.h>
+#include <linux/regulator/machine.h>
+
+/* supplies to the display/camera */
+static struct regulator_init_data ab8500_vaux1_regulator = {
+ .constraints = {
+ .name = "V-DISPLAY",
+ .min_uV = 2500000,
+ .max_uV = 2900000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE|
+ REGULATOR_CHANGE_STATUS,
+ },
+};
+
+/* supplies to the on-board eMMC */
+static struct regulator_init_data ab8500_vaux2_regulator = {
+ .constraints = {
+ .name = "V-eMMC1",
+ .min_uV = 1100000,
+ .max_uV = 3300000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE|
+ REGULATOR_CHANGE_STATUS,
+ },
+};
+
+/* supply for VAUX3, supplies to SDcard slots */
+static struct regulator_init_data ab8500_vaux3_regulator = {
+ .constraints = {
+ .name = "V-MMC-SD",
+ .min_uV = 1100000,
+ .max_uV = 3300000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE|
+ REGULATOR_CHANGE_STATUS,
+ },
+};
+
+/* supply for tvout, gpadc, TVOUT LDO */
+static struct regulator_init_data ab8500_vtvout_init = {
+ .constraints = {
+ .name = "V-TVOUT",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+};
+
+/* supply for ab8500-vaudio, VAUDIO LDO */
+static struct regulator_init_data ab8500_vaudio_init = {
+ .constraints = {
+ .name = "V-AUD",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+};
+
+/* supply for v-anamic1 VAMic1-LDO */
+static struct regulator_init_data ab8500_vamic1_init = {
+ .constraints = {
+ .name = "V-AMIC1",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+};
+
+/* supply for v-amic2, VAMIC2 LDO, reuse constants for AMIC1 */
+static struct regulator_init_data ab8500_vamic2_init = {
+ .constraints = {
+ .name = "V-AMIC2",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+};
+
+/* supply for v-dmic, VDMIC LDO */
+static struct regulator_init_data ab8500_vdmic_init = {
+ .constraints = {
+ .name = "V-DMIC",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+};
+
+/* supply for v-intcore12, VINTCORE12 LDO */
+static struct regulator_init_data ab8500_vintcore_init = {
+ .constraints = {
+ .name = "V-INTCORE",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+};
+
+/* supply for U8500 CSI/DSI, VANA LDO */
+static struct regulator_init_data ab8500_vana_init = {
+ .constraints = {
+ .name = "V-CSI/DSI",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+};
+
diff --git a/arch/arm/mach-ux500/board-mop500-sdi.c b/arch/arm/mach-ux500/board-mop500-sdi.c
new file mode 100644
index 0000000..bac9956
--- /dev/null
+++ b/arch/arm/mach-ux500/board-mop500-sdi.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Hanumath Prasad <hanumath.prasad@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/amba/bus.h>
+#include <linux/amba/mmci.h>
+#include <linux/mmc/host.h>
+#include <linux/platform_device.h>
+
+#include <plat/pincfg.h>
+#include <mach/devices.h>
+#include <mach/hardware.h>
+
+#include "pins-db8500.h"
+#include "board-mop500.h"
+
+static pin_cfg_t mop500_sdi_pins[] = {
+ /* SDI4 (on-board eMMC) */
+ GPIO197_MC4_DAT3,
+ GPIO198_MC4_DAT2,
+ GPIO199_MC4_DAT1,
+ GPIO200_MC4_DAT0,
+ GPIO201_MC4_CMD,
+ GPIO202_MC4_FBCLK,
+ GPIO203_MC4_CLK,
+ GPIO204_MC4_DAT7,
+ GPIO205_MC4_DAT6,
+ GPIO206_MC4_DAT5,
+ GPIO207_MC4_DAT4,
+};
+
+static pin_cfg_t mop500_sdi2_pins[] = {
+ /* SDI2 (POP eMMC) */
+ GPIO128_MC2_CLK,
+ GPIO129_MC2_CMD,
+ GPIO130_MC2_FBCLK,
+ GPIO131_MC2_DAT0,
+ GPIO132_MC2_DAT1,
+ GPIO133_MC2_DAT2,
+ GPIO134_MC2_DAT3,
+ GPIO135_MC2_DAT4,
+ GPIO136_MC2_DAT5,
+ GPIO137_MC2_DAT6,
+ GPIO138_MC2_DAT7,
+};
+
+/*
+ * SDI 2 (POP eMMC, not on DB8500ed)
+ */
+
+static struct mmci_platform_data mop500_sdi2_data = {
+ .ocr_mask = MMC_VDD_165_195,
+ .f_max = 100000000,
+ .capabilities = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
+ .gpio_cd = -1,
+ .gpio_wp = -1,
+};
+
+/*
+ * SDI 4 (on-board eMMC)
+ */
+
+static struct mmci_platform_data mop500_sdi4_data = {
+ .ocr_mask = MMC_VDD_29_30,
+ .f_max = 100000000,
+ .capabilities = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA |
+ MMC_CAP_MMC_HIGHSPEED,
+ .gpio_cd = -1,
+ .gpio_wp = -1,
+};
+
+void mop500_sdi_init(void)
+{
+ nmk_config_pins(mop500_sdi_pins, ARRAY_SIZE(mop500_sdi_pins));
+
+ u8500_sdi2_device.dev.platform_data = &mop500_sdi2_data;
+ u8500_sdi4_device.dev.platform_data = &mop500_sdi4_data;
+
+ if (!cpu_is_u8500ed()) {
+ nmk_config_pins(mop500_sdi2_pins, ARRAY_SIZE(mop500_sdi2_pins));
+ amba_device_register(&u8500_sdi2_device, &iomem_resource);
+ }
+
+ /* On-board eMMC */
+ amba_device_register(&u8500_sdi4_device, &iomem_resource);
+}
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index 0e8fd13..642b8e6 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -28,8 +28,10 @@
#include <mach/hardware.h>
#include <mach/setup.h>
#include <mach/devices.h>
+#include <mach/irqs.h>
#include "pins-db8500.h"
+#include "board-mop500.h"
static pin_cfg_t mop500_pins[] = {
/* SSP0 */
@@ -75,9 +77,27 @@
.irq_base = MOP500_AB8500_IRQ_BASE,
};
-static struct spi_board_info u8500_spi_devices[] = {
+static struct resource ab8500_resources[] = {
+ [0] = {
+ .start = IRQ_AB8500,
+ .end = IRQ_AB8500,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+struct platform_device ab8500_device = {
+ .name = "ab8500-i2c",
+ .id = 0,
+ .dev = {
+ .platform_data = &ab8500_platdata,
+ },
+ .num_resources = 1,
+ .resource = ab8500_resources,
+};
+
+static struct spi_board_info ab8500_spi_devices[] = {
{
- .modalias = "ab8500",
+ .modalias = "ab8500-spi",
.controller_data = &ab4500_chip_info,
.platform_data = &ab8500_platdata,
.max_speed_hz = 12000000,
@@ -163,8 +183,14 @@
platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs));
- spi_register_board_info(u8500_spi_devices,
- ARRAY_SIZE(u8500_spi_devices));
+ mop500_sdi_init();
+
+ /* If HW is early drop (ED) or V1.0 then use SPI to access AB8500 */
+ if (cpu_is_u8500ed() || cpu_is_u8500v10())
+ spi_register_board_info(ab8500_spi_devices,
+ ARRAY_SIZE(ab8500_spi_devices));
+ else /* If HW is v.1.1 or later use I2C to access AB8500 */
+ platform_device_register(&ab8500_device);
}
MACHINE_START(U8500, "ST-Ericsson MOP500 platform")
diff --git a/arch/arm/mach-ux500/board-mop500.h b/arch/arm/mach-ux500/board-mop500.h
new file mode 100644
index 0000000..2d24032
--- /dev/null
+++ b/arch/arm/mach-ux500/board-mop500.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef __BOARD_MOP500_H
+#define __BOARD_MOP500_H
+
+extern void mop500_sdi_init(void);
+
+#endif
diff --git a/arch/arm/mach-ux500/cpu-db5500.c b/arch/arm/mach-ux500/cpu-db5500.c
index e9278f6..2f87075 100644
--- a/arch/arm/mach-ux500/cpu-db5500.c
+++ b/arch/arm/mach-ux500/cpu-db5500.c
@@ -14,6 +14,7 @@
#include <mach/hardware.h>
#include <mach/devices.h>
#include <mach/setup.h>
+#include <mach/irqs.h>
static struct map_desc u5500_io_desc[] __initdata = {
__IO_DEV_DESC(U5500_GPIO0_BASE, SZ_4K),
@@ -24,6 +25,90 @@
__IO_DEV_DESC(U5500_PRCMU_BASE, SZ_4K),
};
+static struct resource mbox0_resources[] = {
+ {
+ .name = "mbox_peer",
+ .start = U5500_MBOX0_PEER_START,
+ .end = U5500_MBOX0_PEER_END,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "mbox_local",
+ .start = U5500_MBOX0_LOCAL_START,
+ .end = U5500_MBOX0_LOCAL_END,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "mbox_irq",
+ .start = MBOX_PAIR0_VIRT_IRQ,
+ .end = MBOX_PAIR0_VIRT_IRQ,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static struct resource mbox1_resources[] = {
+ {
+ .name = "mbox_peer",
+ .start = U5500_MBOX1_PEER_START,
+ .end = U5500_MBOX1_PEER_END,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "mbox_local",
+ .start = U5500_MBOX1_LOCAL_START,
+ .end = U5500_MBOX1_LOCAL_END,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "mbox_irq",
+ .start = MBOX_PAIR1_VIRT_IRQ,
+ .end = MBOX_PAIR1_VIRT_IRQ,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static struct resource mbox2_resources[] = {
+ {
+ .name = "mbox_peer",
+ .start = U5500_MBOX2_PEER_START,
+ .end = U5500_MBOX2_PEER_END,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "mbox_local",
+ .start = U5500_MBOX2_LOCAL_START,
+ .end = U5500_MBOX2_LOCAL_END,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "mbox_irq",
+ .start = MBOX_PAIR2_VIRT_IRQ,
+ .end = MBOX_PAIR2_VIRT_IRQ,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static struct platform_device mbox0_device = {
+ .id = 0,
+ .name = "mbox",
+ .resource = mbox0_resources,
+ .num_resources = ARRAY_SIZE(mbox0_resources),
+};
+
+static struct platform_device mbox1_device = {
+ .id = 1,
+ .name = "mbox",
+ .resource = mbox1_resources,
+ .num_resources = ARRAY_SIZE(mbox1_resources),
+};
+
+static struct platform_device mbox2_device = {
+ .id = 2,
+ .name = "mbox",
+ .resource = mbox2_resources,
+ .num_resources = ARRAY_SIZE(mbox2_resources),
+};
+
static struct platform_device *u5500_platform_devs[] __initdata = {
&u5500_gpio_devs[0],
&u5500_gpio_devs[1],
@@ -33,6 +118,9 @@
&u5500_gpio_devs[5],
&u5500_gpio_devs[6],
&u5500_gpio_devs[7],
+ &mbox0_device,
+ &mbox1_device,
+ &mbox2_device,
};
void __init u5500_map_io(void)
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index f21c444..4acab75 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -38,10 +38,12 @@
/* minimum static i/o mapping required to boot U8500 platforms */
static struct map_desc u8500_io_desc[] __initdata = {
__IO_DEV_DESC(U8500_PRCMU_BASE, SZ_4K),
+ __IO_DEV_DESC(U8500_PRCMU_TCDM_BASE, SZ_4K),
__IO_DEV_DESC(U8500_GPIO0_BASE, SZ_4K),
__IO_DEV_DESC(U8500_GPIO1_BASE, SZ_4K),
__IO_DEV_DESC(U8500_GPIO2_BASE, SZ_4K),
__IO_DEV_DESC(U8500_GPIO3_BASE, SZ_4K),
+ __MEM_DEV_DESC(U8500_BOOT_ROM_BASE, SZ_1M),
};
static struct map_desc u8500ed_io_desc[] __initdata = {
@@ -53,6 +55,69 @@
__IO_DEV_DESC(U8500_MTU0_BASE, SZ_4K),
};
+/*
+ * Functions to differentiate between later ASICs
+ * We look into the end of the ROM to locate the hardcoded ASIC ID.
+ * This is only needed to differentiate between minor revisions and
+ * process variants of an ASIC, the major revisions are encoded in
+ * the cpuid.
+ */
+#define U8500_ASIC_ID_LOC_ED_V1 (U8500_BOOT_ROM_BASE + 0x1FFF4)
+#define U8500_ASIC_ID_LOC_V2 (U8500_BOOT_ROM_BASE + 0x1DBF4)
+#define U8500_ASIC_REV_ED 0x01
+#define U8500_ASIC_REV_V10 0xA0
+#define U8500_ASIC_REV_V11 0xA1
+#define U8500_ASIC_REV_V20 0xB0
+
+/**
+ * struct db8500_asic_id - fields of the ASIC ID
+ * @process: the manufacturing process, 0x40 is 40 nm
+ * 0x00 is "standard"
+ * @partnumber: hithereto 0x8500 for DB8500
+ * @revision: version code in the series
+ * This field definion is not formally defined but makes
+ * sense.
+ */
+struct db8500_asic_id {
+ u8 process;
+ u16 partnumber;
+ u8 revision;
+};
+
+/* This isn't going to change at runtime */
+static struct db8500_asic_id db8500_id;
+
+static void __init get_db8500_asic_id(void)
+{
+ u32 asicid;
+
+ if (cpu_is_u8500v1() || cpu_is_u8500ed())
+ asicid = readl(__io_address(U8500_ASIC_ID_LOC_ED_V1));
+ else if (cpu_is_u8500v2())
+ asicid = readl(__io_address(U8500_ASIC_ID_LOC_V2));
+ else
+ BUG();
+
+ db8500_id.process = (asicid >> 24);
+ db8500_id.partnumber = (asicid >> 16) & 0xFFFFU;
+ db8500_id.revision = asicid & 0xFFU;
+}
+
+bool cpu_is_u8500v10(void)
+{
+ return (db8500_id.revision == U8500_ASIC_REV_V10);
+}
+
+bool cpu_is_u8500v11(void)
+{
+ return (db8500_id.revision == U8500_ASIC_REV_V11);
+}
+
+bool cpu_is_u8500v20(void)
+{
+ return (db8500_id.revision == U8500_ASIC_REV_V20);
+}
+
void __init u8500_map_io(void)
{
ux500_map_io();
@@ -63,6 +128,9 @@
iotable_init(u8500ed_io_desc, ARRAY_SIZE(u8500ed_io_desc));
else
iotable_init(u8500v1_io_desc, ARRAY_SIZE(u8500v1_io_desc));
+
+ /* Read out the ASIC ID as early as we can */
+ get_db8500_asic_id();
}
/*
@@ -70,6 +138,20 @@
*/
void __init u8500_init_devices(void)
{
+ /* Display some ASIC boilerplate */
+ pr_info("DB8500: process: %02x, revision ID: 0x%02x\n",
+ db8500_id.process, db8500_id.revision);
+ if (cpu_is_u8500ed())
+ pr_info("DB8500: Early Drop (ED)\n");
+ else if (cpu_is_u8500v10())
+ pr_info("DB8500: version 1.0\n");
+ else if (cpu_is_u8500v11())
+ pr_info("DB8500: version 1.1\n");
+ else if (cpu_is_u8500v20())
+ pr_info("DB8500: version 2.0\n");
+ else
+ pr_warning("ASIC: UNKNOWN SILICON VERSION!\n");
+
ux500_init_devices();
if (cpu_is_u8500ed())
diff --git a/arch/arm/mach-ux500/devices-db8500.c b/arch/arm/mach-ux500/devices-db8500.c
index 9280d25..40032fe 100644
--- a/arch/arm/mach-ux500/devices-db8500.c
+++ b/arch/arm/mach-ux500/devices-db8500.c
@@ -110,6 +110,82 @@
.num_resources = ARRAY_SIZE(u8500_i2c4_resources),
};
+/*
+ * SD/MMC
+ */
+
+struct amba_device u8500_sdi0_device = {
+ .dev = {
+ .init_name = "sdi0",
+ },
+ .res = {
+ .start = U8500_SDI0_BASE,
+ .end = U8500_SDI0_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .irq = {IRQ_DB8500_SDMMC0, NO_IRQ},
+};
+
+struct amba_device u8500_sdi1_device = {
+ .dev = {
+ .init_name = "sdi1",
+ },
+ .res = {
+ .start = U8500_SDI1_BASE,
+ .end = U8500_SDI1_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .irq = {IRQ_DB8500_SDMMC1, NO_IRQ},
+};
+
+struct amba_device u8500_sdi2_device = {
+ .dev = {
+ .init_name = "sdi2",
+ },
+ .res = {
+ .start = U8500_SDI2_BASE,
+ .end = U8500_SDI2_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .irq = {IRQ_DB8500_SDMMC2, NO_IRQ},
+};
+
+struct amba_device u8500_sdi3_device = {
+ .dev = {
+ .init_name = "sdi3",
+ },
+ .res = {
+ .start = U8500_SDI3_BASE,
+ .end = U8500_SDI3_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .irq = {IRQ_DB8500_SDMMC3, NO_IRQ},
+};
+
+struct amba_device u8500_sdi4_device = {
+ .dev = {
+ .init_name = "sdi4",
+ },
+ .res = {
+ .start = U8500_SDI4_BASE,
+ .end = U8500_SDI4_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .irq = {IRQ_DB8500_SDMMC4, NO_IRQ},
+};
+
+struct amba_device u8500_sdi5_device = {
+ .dev = {
+ .init_name = "sdi5",
+ },
+ .res = {
+ .start = U8500_SDI5_BASE,
+ .end = U8500_SDI5_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .irq = {IRQ_DB8500_SDMMC5, NO_IRQ},
+};
+
static struct resource dma40_resources[] = {
[0] = {
.start = U8500_DMA_BASE,
@@ -170,23 +246,23 @@
* Mapping between destination event lines and physical device address.
* The event line is tied to a device and therefor the address is constant.
*/
-static const dma_addr_t dma40_tx_map[STEDMA40_NR_DEV];
+static const dma_addr_t dma40_tx_map[DB8500_DMA_NR_DEV];
/* Mapping between source event lines and physical device address */
-static const dma_addr_t dma40_rx_map[STEDMA40_NR_DEV];
+static const dma_addr_t dma40_rx_map[DB8500_DMA_NR_DEV];
/* Reserved event lines for memcpy only */
static int dma40_memcpy_event[] = {
- STEDMA40_MEMCPY_TX_0,
- STEDMA40_MEMCPY_TX_1,
- STEDMA40_MEMCPY_TX_2,
- STEDMA40_MEMCPY_TX_3,
- STEDMA40_MEMCPY_TX_4,
- STEDMA40_MEMCPY_TX_5,
+ DB8500_DMA_MEMCPY_TX_0,
+ DB8500_DMA_MEMCPY_TX_1,
+ DB8500_DMA_MEMCPY_TX_2,
+ DB8500_DMA_MEMCPY_TX_3,
+ DB8500_DMA_MEMCPY_TX_4,
+ DB8500_DMA_MEMCPY_TX_5,
};
static struct stedma40_platform_data dma40_plat_data = {
- .dev_len = STEDMA40_NR_DEV,
+ .dev_len = DB8500_DMA_NR_DEV,
.dev_rx = dma40_rx_map,
.dev_tx = dma40_tx_map,
.memcpy = dma40_memcpy_event,
diff --git a/arch/arm/mach-ux500/hotplug.c b/arch/arm/mach-ux500/hotplug.c
new file mode 100644
index 0000000..b782a03
--- /dev/null
+++ b/arch/arm/mach-ux500/hotplug.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) STMicroelectronics 2009
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ * Based on ARM realview platform
+ *
+ * Author: Sundar Iyer <sundar.iyer@stericsson.com>
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+#include <linux/completion.h>
+
+#include <asm/cacheflush.h>
+
+extern volatile int pen_release;
+
+static DECLARE_COMPLETION(cpu_killed);
+
+static inline void platform_do_lowpower(unsigned int cpu)
+{
+ flush_cache_all();
+
+ /* we put the platform to just WFI */
+ for (;;) {
+ __asm__ __volatile__("dsb\n\t" "wfi\n\t"
+ : : : "memory");
+ if (pen_release == cpu) {
+ /*
+ * OK, proper wakeup, we're done
+ */
+ break;
+ }
+ }
+}
+
+int platform_cpu_kill(unsigned int cpu)
+{
+ return wait_for_completion_timeout(&cpu_killed, 5000);
+}
+
+/*
+ * platform-specific code to shutdown a CPU
+ *
+ * Called with IRQs disabled
+ */
+void platform_cpu_die(unsigned int cpu)
+{
+#ifdef DEBUG
+ unsigned int this_cpu = hard_smp_processor_id();
+
+ if (cpu != this_cpu) {
+ printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n",
+ this_cpu, cpu);
+ BUG();
+ }
+#endif
+
+ printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
+ complete(&cpu_killed);
+
+ /* directly enter low power state, skipping secure registers */
+ platform_do_lowpower(cpu);
+}
+
+int platform_cpu_disable(unsigned int cpu)
+{
+ /*
+ * we don't allow CPU 0 to be shutdown (it is still too special
+ * e.g. clock tick interrupts)
+ */
+ return cpu == 0 ? -EPERM : 0;
+}
diff --git a/arch/arm/mach-ux500/include/mach/db5500-regs.h b/arch/arm/mach-ux500/include/mach/db5500-regs.h
index 545c80f..3eafc0e 100644
--- a/arch/arm/mach-ux500/include/mach/db5500-regs.h
+++ b/arch/arm/mach-ux500/include/mach/db5500-regs.h
@@ -100,4 +100,18 @@
#define U5500_GPIOBANK6_BASE (U5500_GPIO4_BASE + 0x80)
#define U5500_GPIOBANK7_BASE (U5500_GPIO4_BASE + 0x100)
+#define U5500_MBOX_BASE (U5500_MODEM_BASE + 0xFFD1000)
+#define U5500_MBOX0_PEER_START (U5500_MBOX_BASE + 0x40)
+#define U5500_MBOX0_PEER_END (U5500_MBOX_BASE + 0x5F)
+#define U5500_MBOX0_LOCAL_START (U5500_MBOX_BASE + 0x60)
+#define U5500_MBOX0_LOCAL_END (U5500_MBOX_BASE + 0x7F)
+#define U5500_MBOX1_PEER_START (U5500_MBOX_BASE + 0x80)
+#define U5500_MBOX1_PEER_END (U5500_MBOX_BASE + 0x9F)
+#define U5500_MBOX1_LOCAL_START (U5500_MBOX_BASE + 0xA0)
+#define U5500_MBOX1_LOCAL_END (U5500_MBOX_BASE + 0xBF)
+#define U5500_MBOX2_PEER_START (U5500_MBOX_BASE + 0x00)
+#define U5500_MBOX2_PEER_END (U5500_MBOX_BASE + 0x1F)
+#define U5500_MBOX2_LOCAL_START (U5500_MBOX_BASE + 0x20)
+#define U5500_MBOX2_LOCAL_END (U5500_MBOX_BASE + 0x3F)
+
#endif
diff --git a/arch/arm/mach-ux500/include/mach/db8500-regs.h b/arch/arm/mach-ux500/include/mach/db8500-regs.h
index f000218..f07d098 100644
--- a/arch/arm/mach-ux500/include/mach/db8500-regs.h
+++ b/arch/arm/mach-ux500/include/mach/db8500-regs.h
@@ -30,8 +30,6 @@
#define U8500_ICN_BASE 0x81000000
#define U8500_BOOT_ROM_BASE 0x90000000
-/* ASIC ID is at 0xff4 offset within this region */
-#define U8500_ASIC_ID_BASE 0x9001F000
#define U8500_PER6_BASE 0xa03c0000
#define U8500_PER5_BASE 0xa03e0000
diff --git a/arch/arm/mach-ux500/include/mach/devices.h b/arch/arm/mach-ux500/include/mach/devices.h
index c2b2f25..33a120c 100644
--- a/arch/arm/mach-ux500/include/mach/devices.h
+++ b/arch/arm/mach-ux500/include/mach/devices.h
@@ -27,6 +27,13 @@
extern struct platform_device u8500_i2c4_device;
extern struct platform_device u8500_dma40_device;
+extern struct amba_device u8500_sdi0_device;
+extern struct amba_device u8500_sdi1_device;
+extern struct amba_device u8500_sdi2_device;
+extern struct amba_device u8500_sdi3_device;
+extern struct amba_device u8500_sdi4_device;
+extern struct amba_device u8500_sdi5_device;
+
void dma40_u8500ed_fixup(void);
#endif
diff --git a/arch/arm/mach-ux500/include/mach/hardware.h b/arch/arm/mach-ux500/include/mach/hardware.h
index 8656379..32e883a 100644
--- a/arch/arm/mach-ux500/include/mach/hardware.h
+++ b/arch/arm/mach-ux500/include/mach/hardware.h
@@ -104,16 +104,35 @@
#endif
}
+#define CPUID_DB8500ED 0x410fc090
+#define CPUID_DB8500V1 0x411fc091
+#define CPUID_DB8500V2 0x412fc091
+
static inline bool cpu_is_u8500ed(void)
{
- return cpu_is_u8500() && (read_cpuid_id() & 15) == 0;
+ return cpu_is_u8500() && (read_cpuid_id() == CPUID_DB8500ED);
}
static inline bool cpu_is_u8500v1(void)
{
- return cpu_is_u8500() && (read_cpuid_id() & 15) == 1;
+ return cpu_is_u8500() && (read_cpuid_id() == CPUID_DB8500V1);
}
+static inline bool cpu_is_u8500v2(void)
+{
+ return cpu_is_u8500() && (read_cpuid_id() == CPUID_DB8500V2);
+}
+
+#ifdef CONFIG_UX500_SOC_DB8500
+bool cpu_is_u8500v10(void);
+bool cpu_is_u8500v11(void);
+bool cpu_is_u8500v20(void);
+#else
+static inline bool cpu_is_u8500v10(void) { return false; }
+static inline bool cpu_is_u8500v11(void) { return false; }
+static inline bool cpu_is_u8500v20(void) { return false; }
+#endif
+
static inline bool cpu_is_u5500(void)
{
#ifdef CONFIG_UX500_SOC_DB5500
diff --git a/arch/arm/mach-ux500/include/mach/irqs-db5500.h b/arch/arm/mach-ux500/include/mach/irqs-db5500.h
index 6fbfe5e..bfa123d 100644
--- a/arch/arm/mach-ux500/include/mach/irqs-db5500.h
+++ b/arch/arm/mach-ux500/include/mach/irqs-db5500.h
@@ -61,6 +61,7 @@
#define IRQ_DB5500_SDMMC0 (IRQ_SHPI_START + 60)
#define IRQ_DB5500_HSEM (IRQ_SHPI_START + 61)
#define IRQ_DB5500_SBAG (IRQ_SHPI_START + 63)
+#define IRQ_DB5500_MODEM (IRQ_SHPI_START + 65)
#define IRQ_DB5500_SPI1 (IRQ_SHPI_START + 96)
#define IRQ_DB5500_MSP2 (IRQ_SHPI_START + 98)
#define IRQ_DB5500_SRPTIMER (IRQ_SHPI_START + 101)
diff --git a/arch/arm/mach-ux500/include/mach/irqs.h b/arch/arm/mach-ux500/include/mach/irqs.h
index 10385bd..693aa57 100644
--- a/arch/arm/mach-ux500/include/mach/irqs.h
+++ b/arch/arm/mach-ux500/include/mach/irqs.h
@@ -40,7 +40,8 @@
#define IRQ_HSIR_CH1_OVRRUN (IRQ_SHPI_START + 33)
#define IRQ_HSIR_CH2_OVRRUN (IRQ_SHPI_START + 34)
#define IRQ_HSIR_CH3_OVRRUN (IRQ_SHPI_START + 35)
-#define IRQ_AB4500 (IRQ_SHPI_START + 40)
+#define IRQ_AB8500 (IRQ_SHPI_START + 40)
+#define IRQ_PRCMU (IRQ_SHPI_START + 47)
#define IRQ_DISP (IRQ_SHPI_START + 48)
#define IRQ_SiPI3 (IRQ_SHPI_START + 49)
#define IRQ_I2C4 (IRQ_SHPI_START + 51)
@@ -83,6 +84,19 @@
#include <mach/irqs-board-mop500.h>
#endif
-#define NR_IRQS IRQ_BOARD_END
+/*
+ * After the board specific IRQ:s we reserve a range of IRQ:s in which virtual
+ * IRQ:s representing modem IRQ:s can be allocated
+ */
+#define IRQ_MODEM_EVENTS_BASE (IRQ_BOARD_END + 1)
+#define IRQ_MODEM_EVENTS_NBR 72
+#define IRQ_MODEM_EVENTS_END (IRQ_MODEM_EVENTS_BASE + IRQ_MODEM_EVENTS_NBR)
+
+/* List of virtual IRQ:s that are allocated from the range above */
+#define MBOX_PAIR0_VIRT_IRQ (IRQ_MODEM_EVENTS_BASE + 43)
+#define MBOX_PAIR1_VIRT_IRQ (IRQ_MODEM_EVENTS_BASE + 45)
+#define MBOX_PAIR2_VIRT_IRQ (IRQ_MODEM_EVENTS_BASE + 41)
+
+#define NR_IRQS IRQ_MODEM_EVENTS_END
#endif /* ASM_ARCH_IRQS_H */
diff --git a/arch/arm/mach-ux500/include/mach/mbox.h b/arch/arm/mach-ux500/include/mach/mbox.h
new file mode 100644
index 0000000..7f9da4d
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/mbox.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Stefan Nilsson <stefan.xk.nilsson@stericsson.com> for ST-Ericsson.
+ * Author: Martin Persson <martin.persson@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef __INC_STE_MBOX_H
+#define __INC_STE_MBOX_H
+
+#define MBOX_BUF_SIZE 16
+#define MBOX_NAME_SIZE 8
+
+/**
+ * mbox_recv_cb_t - Definition of the mailbox callback.
+ * @mbox_msg: The mailbox message.
+ * @priv: The clients private data as specified in the call to mbox_setup.
+ *
+ * This function will be called upon reception of new mailbox messages.
+ */
+typedef void mbox_recv_cb_t (u32 mbox_msg, void *priv);
+
+/**
+ * struct mbox - Mailbox instance struct
+ * @list: Linked list head.
+ * @pdev: Pointer to device struct.
+ * @cb: Callback function. Will be called
+ * when new data is received.
+ * @client_data: Clients private data. Will be sent back
+ * in the callback function.
+ * @virtbase_peer: Virtual address for outgoing mailbox.
+ * @virtbase_local: Virtual address for incoming mailbox.
+ * @buffer: Then internal queue for outgoing messages.
+ * @name: Name of this mailbox.
+ * @buffer_available: Completion variable to achieve "blocking send".
+ * This variable will be signaled when there is
+ * internal buffer space available.
+ * @client_blocked: To keep track if any client is currently
+ * blocked.
+ * @lock: Spinlock to protect this mailbox instance.
+ * @write_index: Index in internal buffer to write to.
+ * @read_index: Index in internal buffer to read from.
+ * @allocated: Indicates whether this particular mailbox
+ * id has been allocated by someone.
+ */
+struct mbox {
+ struct list_head list;
+ struct platform_device *pdev;
+ mbox_recv_cb_t *cb;
+ void *client_data;
+ void __iomem *virtbase_peer;
+ void __iomem *virtbase_local;
+ u32 buffer[MBOX_BUF_SIZE];
+ char name[MBOX_NAME_SIZE];
+ struct completion buffer_available;
+ u8 client_blocked;
+ spinlock_t lock;
+ u8 write_index;
+ u8 read_index;
+ bool allocated;
+};
+
+/**
+ * mbox_setup - Set up a mailbox and return its instance.
+ * @mbox_id: The ID number of the mailbox. 0 or 1 for modem CPU,
+ * 2 for modem DSP.
+ * @mbox_cb: Pointer to the callback function to be called when a new message
+ * is received.
+ * @priv: Client user data which will be returned in the callback.
+ *
+ * Returns a mailbox instance to be specified in subsequent calls to mbox_send.
+ */
+struct mbox *mbox_setup(u8 mbox_id, mbox_recv_cb_t *mbox_cb, void *priv);
+
+/**
+ * mbox_send - Send a mailbox message.
+ * @mbox: Mailbox instance (returned by mbox_setup)
+ * @mbox_msg: The mailbox message to send.
+ * @block: Specifies whether this call will block until send is possible,
+ * or return an error if the mailbox buffer is full.
+ *
+ * Returns 0 on success or a negative error code on error. -ENOMEM indicates
+ * that the internal buffer is full and you have to try again later (or
+ * specify "block" in order to block until send is possible).
+ */
+int mbox_send(struct mbox *mbox, u32 mbox_msg, bool block);
+
+#endif /*INC_STE_MBOX_H*/
diff --git a/arch/arm/mach-ux500/include/mach/prcmu-regs.h b/arch/arm/mach-ux500/include/mach/prcmu-regs.h
new file mode 100644
index 0000000..8885f39
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/prcmu-regs.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2009 ST-Ericsson SA
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+#ifndef __MACH_PRCMU_REGS_H
+#define __MACH_PRCMU_REGS_H
+
+#include <mach/hardware.h>
+
+#define _PRCMU_BASE IO_ADDRESS(U8500_PRCMU_BASE)
+
+#define PRCM_ARM_PLLDIVPS (_PRCMU_BASE + 0x118)
+#define PRCM_ARM_CHGCLKREQ (_PRCMU_BASE + 0x114)
+#define PRCM_PLLARM_ENABLE (_PRCMU_BASE + 0x98)
+#define PRCM_ARMCLKFIX_MGT (_PRCMU_BASE + 0x0)
+#define PRCM_A9_RESETN_CLR (_PRCMU_BASE + 0x1f4)
+#define PRCM_A9_RESETN_SET (_PRCMU_BASE + 0x1f0)
+#define PRCM_ARM_LS_CLAMP (_PRCMU_BASE + 0x30c)
+#define PRCM_SRAM_A9 (_PRCMU_BASE + 0x308)
+
+/* ARM WFI Standby signal register */
+#define PRCM_ARM_WFI_STANDBY (_PRCMU_BASE + 0x130)
+#define PRCMU_IOCR (_PRCMU_BASE + 0x310)
+
+/* CPU mailbox registers */
+#define PRCM_MBOX_CPU_VAL (_PRCMU_BASE + 0x0fc)
+#define PRCM_MBOX_CPU_SET (_PRCMU_BASE + 0x100)
+#define PRCM_MBOX_CPU_CLR (_PRCMU_BASE + 0x104)
+
+/* Dual A9 core interrupt management unit registers */
+#define PRCM_A9_MASK_REQ (_PRCMU_BASE + 0x328)
+#define PRCM_A9_MASK_ACK (_PRCMU_BASE + 0x32c)
+#define PRCM_ARMITMSK31TO0 (_PRCMU_BASE + 0x11c)
+#define PRCM_ARMITMSK63TO32 (_PRCMU_BASE + 0x120)
+#define PRCM_ARMITMSK95TO64 (_PRCMU_BASE + 0x124)
+#define PRCM_ARMITMSK127TO96 (_PRCMU_BASE + 0x128)
+#define PRCM_POWER_STATE_VAL (_PRCMU_BASE + 0x25C)
+#define PRCM_ARMITVAL31TO0 (_PRCMU_BASE + 0x260)
+#define PRCM_ARMITVAL63TO32 (_PRCMU_BASE + 0x264)
+#define PRCM_ARMITVAL95TO64 (_PRCMU_BASE + 0x268)
+#define PRCM_ARMITVAL127TO96 (_PRCMU_BASE + 0x26C)
+
+#define PRCM_HOSTACCESS_REQ (_PRCMU_BASE + 0x334)
+#define ARM_WAKEUP_MODEM 0x1
+
+#define PRCM_ARM_IT1_CLEAR (_PRCMU_BASE + 0x48C)
+#define PRCM_ARM_IT1_VAL (_PRCMU_BASE + 0x494)
+#define PRCM_HOLD_EVT (_PRCMU_BASE + 0x174)
+
+#define PRCM_ITSTATUS0 (_PRCMU_BASE + 0x148)
+#define PRCM_ITSTATUS1 (_PRCMU_BASE + 0x150)
+#define PRCM_ITSTATUS2 (_PRCMU_BASE + 0x158)
+#define PRCM_ITSTATUS3 (_PRCMU_BASE + 0x160)
+#define PRCM_ITSTATUS4 (_PRCMU_BASE + 0x168)
+#define PRCM_ITSTATUS5 (_PRCMU_BASE + 0x484)
+#define PRCM_ITCLEAR5 (_PRCMU_BASE + 0x488)
+#define PRCM_ARMIT_MASKXP70_IT (_PRCMU_BASE + 0x1018)
+
+/* System reset register */
+#define PRCM_APE_SOFTRST (_PRCMU_BASE + 0x228)
+
+/* Level shifter and clamp control registers */
+#define PRCM_MMIP_LS_CLAMP_SET (_PRCMU_BASE + 0x420)
+#define PRCM_MMIP_LS_CLAMP_CLR (_PRCMU_BASE + 0x424)
+
+/* PRCMU clock/PLL/reset registers */
+#define PRCM_PLLDSI_FREQ (_PRCMU_BASE + 0x500)
+#define PRCM_PLLDSI_ENABLE (_PRCMU_BASE + 0x504)
+#define PRCM_LCDCLK_MGT (_PRCMU_BASE + 0x044)
+#define PRCM_MCDECLK_MGT (_PRCMU_BASE + 0x064)
+#define PRCM_HDMICLK_MGT (_PRCMU_BASE + 0x058)
+#define PRCM_TVCLK_MGT (_PRCMU_BASE + 0x07c)
+#define PRCM_DSI_PLLOUT_SEL (_PRCMU_BASE + 0x530)
+#define PRCM_DSITVCLK_DIV (_PRCMU_BASE + 0x52C)
+#define PRCM_APE_RESETN_SET (_PRCMU_BASE + 0x1E4)
+#define PRCM_APE_RESETN_CLR (_PRCMU_BASE + 0x1E8)
+
+/* ePOD and memory power signal control registers */
+#define PRCM_EPOD_C_SET (_PRCMU_BASE + 0x410)
+#define PRCM_SRAM_LS_SLEEP (_PRCMU_BASE + 0x304)
+
+/* Debug power control unit registers */
+#define PRCM_POWER_STATE_SET (_PRCMU_BASE + 0x254)
+
+/* Miscellaneous unit registers */
+#define PRCM_DSI_SW_RESET (_PRCMU_BASE + 0x324)
+
+#endif /* __MACH_PRCMU__REGS_H */
diff --git a/arch/arm/mach-ux500/include/mach/prcmu.h b/arch/arm/mach-ux500/include/mach/prcmu.h
new file mode 100644
index 0000000..549843f
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/prcmu.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (C) STMicroelectronics 2009
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * PRCMU f/w APIs
+ */
+#ifndef __MACH_PRCMU_H
+#define __MACH_PRCMU_H
+
+int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size);
+int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size);
+
+#endif /* __MACH_PRCMU_H */
diff --git a/arch/arm/mach-ux500/include/mach/setup.h b/arch/arm/mach-ux500/include/mach/setup.h
index e978dbd..54bbe64 100644
--- a/arch/arm/mach-ux500/include/mach/setup.h
+++ b/arch/arm/mach-ux500/include/mach/setup.h
@@ -38,4 +38,11 @@
.type = MT_DEVICE, \
}
+#define __MEM_DEV_DESC(x, sz) { \
+ .virtual = IO_ADDRESS(x), \
+ .pfn = __phys_to_pfn(x), \
+ .length = sz, \
+ .type = MT_MEMORY, \
+}
+
#endif /* __ASM_ARCH_SETUP_H */
diff --git a/arch/arm/mach-ux500/include/mach/smp.h b/arch/arm/mach-ux500/include/mach/smp.h
index b59f7bc..197e841 100644
--- a/arch/arm/mach-ux500/include/mach/smp.h
+++ b/arch/arm/mach-ux500/include/mach/smp.h
@@ -10,18 +10,11 @@
#define ASMARM_ARCH_SMP_H
#include <asm/hardware/gic.h>
+#include <asm/smp_mpidr.h>
/* This is required to wakeup the secondary core */
extern void u8500_secondary_startup(void);
-#define hard_smp_processor_id() \
- ({ \
- unsigned int cpunum; \
- __asm__("mrc p15, 0, %0, c0, c0, 5" \
- : "=r" (cpunum)); \
- cpunum &= 0x0F; \
- })
-
/*
* We use IRQ1 as the IPI
*/
diff --git a/arch/arm/mach-ux500/mbox.c b/arch/arm/mach-ux500/mbox.c
new file mode 100644
index 0000000..6343538
--- /dev/null
+++ b/arch/arm/mach-ux500/mbox.c
@@ -0,0 +1,567 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Stefan Nilsson <stefan.xk.nilsson@stericsson.com> for ST-Ericsson.
+ * Author: Martin Persson <martin.persson@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+/*
+ * Mailbox nomenclature:
+ *
+ * APE MODEM
+ * mbox pairX
+ * ..........................
+ * . .
+ * . peer .
+ * . send ---- .
+ * . --> | | .
+ * . | | .
+ * . ---- .
+ * . .
+ * . local .
+ * . rec ---- .
+ * . | | <-- .
+ * . | | .
+ * . ---- .
+ * .........................
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/completion.h>
+#include <mach/mbox.h>
+
+#define MBOX_NAME "mbox"
+
+#define MBOX_FIFO_DATA 0x000
+#define MBOX_FIFO_ADD 0x004
+#define MBOX_FIFO_REMOVE 0x008
+#define MBOX_FIFO_THRES_FREE 0x00C
+#define MBOX_FIFO_THRES_OCCUP 0x010
+#define MBOX_FIFO_STATUS 0x014
+
+#define MBOX_DISABLE_IRQ 0x4
+#define MBOX_ENABLE_IRQ 0x0
+#define MBOX_LATCH 1
+
+/* Global list of all mailboxes */
+static struct list_head mboxs = LIST_HEAD_INIT(mboxs);
+
+static struct mbox *get_mbox_with_id(u8 id)
+{
+ u8 i;
+ struct list_head *pos = &mboxs;
+ for (i = 0; i <= id; i++)
+ pos = pos->next;
+
+ return (struct mbox *) list_entry(pos, struct mbox, list);
+}
+
+int mbox_send(struct mbox *mbox, u32 mbox_msg, bool block)
+{
+ int res = 0;
+
+ spin_lock(&mbox->lock);
+
+ dev_dbg(&(mbox->pdev->dev),
+ "About to buffer 0x%X to mailbox 0x%X."
+ " ri = %d, wi = %d\n",
+ mbox_msg, (u32)mbox, mbox->read_index,
+ mbox->write_index);
+
+ /* Check if write buffer is full */
+ while (((mbox->write_index + 1) % MBOX_BUF_SIZE) == mbox->read_index) {
+ if (!block) {
+ dev_dbg(&(mbox->pdev->dev),
+ "Buffer full in non-blocking call! "
+ "Returning -ENOMEM!\n");
+ res = -ENOMEM;
+ goto exit;
+ }
+ spin_unlock(&mbox->lock);
+ dev_dbg(&(mbox->pdev->dev),
+ "Buffer full in blocking call! Sleeping...\n");
+ mbox->client_blocked = 1;
+ wait_for_completion(&mbox->buffer_available);
+ dev_dbg(&(mbox->pdev->dev),
+ "Blocking send was woken up! Trying again...\n");
+ spin_lock(&mbox->lock);
+ }
+
+ mbox->buffer[mbox->write_index] = mbox_msg;
+ mbox->write_index = (mbox->write_index + 1) % MBOX_BUF_SIZE;
+
+ /*
+ * Indicate that we want an IRQ as soon as there is a slot
+ * in the FIFO
+ */
+ writel(MBOX_ENABLE_IRQ, mbox->virtbase_peer + MBOX_FIFO_THRES_FREE);
+
+exit:
+ spin_unlock(&mbox->lock);
+ return res;
+}
+EXPORT_SYMBOL(mbox_send);
+
+#if defined(CONFIG_DEBUG_FS)
+/*
+ * Expected input: <value> <nbr sends>
+ * Example: "echo 0xdeadbeef 4 > mbox-node" sends 0xdeadbeef 4 times
+ */
+static ssize_t mbox_write_fifo(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ unsigned long mbox_mess;
+ unsigned long nbr_sends;
+ unsigned long i;
+ char int_buf[16];
+ char *token;
+ char *val;
+
+ struct mbox *mbox = (struct mbox *) dev->platform_data;
+
+ strncpy((char *) &int_buf, buf, sizeof(int_buf));
+ token = (char *) &int_buf;
+
+ /* Parse message */
+ val = strsep(&token, " ");
+ if ((val == NULL) || (strict_strtoul(val, 16, &mbox_mess) != 0))
+ mbox_mess = 0xDEADBEEF;
+
+ val = strsep(&token, " ");
+ if ((val == NULL) || (strict_strtoul(val, 10, &nbr_sends) != 0))
+ nbr_sends = 1;
+
+ dev_dbg(dev, "Will write 0x%lX %ld times using data struct at 0x%X\n",
+ mbox_mess, nbr_sends, (u32) mbox);
+
+ for (i = 0; i < nbr_sends; i++)
+ mbox_send(mbox, mbox_mess, true);
+
+ return count;
+}
+
+static ssize_t mbox_read_fifo(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int mbox_value;
+ struct mbox *mbox = (struct mbox *) dev->platform_data;
+
+ if ((readl(mbox->virtbase_local + MBOX_FIFO_STATUS) & 0x7) <= 0)
+ return sprintf(buf, "Mailbox is empty\n");
+
+ mbox_value = readl(mbox->virtbase_local + MBOX_FIFO_DATA);
+ writel(MBOX_LATCH, (mbox->virtbase_local + MBOX_FIFO_REMOVE));
+
+ return sprintf(buf, "0x%X\n", mbox_value);
+}
+
+static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
+
+static int mbox_show(struct seq_file *s, void *data)
+{
+ struct list_head *pos;
+ u8 mbox_index = 0;
+
+ list_for_each(pos, &mboxs) {
+ struct mbox *m =
+ (struct mbox *) list_entry(pos, struct mbox, list);
+ if (m == NULL) {
+ seq_printf(s,
+ "Unable to retrieve mailbox %d\n",
+ mbox_index);
+ continue;
+ }
+
+ spin_lock(&m->lock);
+ if ((m->virtbase_peer == NULL) || (m->virtbase_local == NULL)) {
+ seq_printf(s, "MAILBOX %d not setup or corrupt\n",
+ mbox_index);
+ spin_unlock(&m->lock);
+ continue;
+ }
+
+ seq_printf(s,
+ "===========================\n"
+ " MAILBOX %d\n"
+ " PEER MAILBOX DUMP\n"
+ "---------------------------\n"
+ "FIFO: 0x%X (%d)\n"
+ "Free Threshold: 0x%.2X (%d)\n"
+ "Occupied Threshold: 0x%.2X (%d)\n"
+ "Status: 0x%.2X (%d)\n"
+ " Free spaces (ot): %d (%d)\n"
+ " Occup spaces (ot): %d (%d)\n"
+ "===========================\n"
+ " LOCAL MAILBOX DUMP\n"
+ "---------------------------\n"
+ "FIFO: 0x%.X (%d)\n"
+ "Free Threshold: 0x%.2X (%d)\n"
+ "Occupied Threshold: 0x%.2X (%d)\n"
+ "Status: 0x%.2X (%d)\n"
+ " Free spaces (ot): %d (%d)\n"
+ " Occup spaces (ot): %d (%d)\n"
+ "===========================\n"
+ "write_index: %d\n"
+ "read_index : %d\n"
+ "===========================\n"
+ "\n",
+ mbox_index,
+ readl(m->virtbase_peer + MBOX_FIFO_DATA),
+ readl(m->virtbase_peer + MBOX_FIFO_DATA),
+ readl(m->virtbase_peer + MBOX_FIFO_THRES_FREE),
+ readl(m->virtbase_peer + MBOX_FIFO_THRES_FREE),
+ readl(m->virtbase_peer + MBOX_FIFO_THRES_OCCUP),
+ readl(m->virtbase_peer + MBOX_FIFO_THRES_OCCUP),
+ readl(m->virtbase_peer + MBOX_FIFO_STATUS),
+ readl(m->virtbase_peer + MBOX_FIFO_STATUS),
+ (readl(m->virtbase_peer + MBOX_FIFO_STATUS) >> 4) & 0x7,
+ (readl(m->virtbase_peer + MBOX_FIFO_STATUS) >> 7) & 0x1,
+ (readl(m->virtbase_peer + MBOX_FIFO_STATUS) >> 0) & 0x7,
+ (readl(m->virtbase_peer + MBOX_FIFO_STATUS) >> 3) & 0x1,
+ readl(m->virtbase_local + MBOX_FIFO_DATA),
+ readl(m->virtbase_local + MBOX_FIFO_DATA),
+ readl(m->virtbase_local + MBOX_FIFO_THRES_FREE),
+ readl(m->virtbase_local + MBOX_FIFO_THRES_FREE),
+ readl(m->virtbase_local + MBOX_FIFO_THRES_OCCUP),
+ readl(m->virtbase_local + MBOX_FIFO_THRES_OCCUP),
+ readl(m->virtbase_local + MBOX_FIFO_STATUS),
+ readl(m->virtbase_local + MBOX_FIFO_STATUS),
+ (readl(m->virtbase_local + MBOX_FIFO_STATUS) >> 4) & 0x7,
+ (readl(m->virtbase_local + MBOX_FIFO_STATUS) >> 7) & 0x1,
+ (readl(m->virtbase_local + MBOX_FIFO_STATUS) >> 0) & 0x7,
+ (readl(m->virtbase_local + MBOX_FIFO_STATUS) >> 3) & 0x1,
+ m->write_index, m->read_index);
+ mbox_index++;
+ spin_unlock(&m->lock);
+ }
+
+ return 0;
+}
+
+static int mbox_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mbox_show, NULL);
+}
+
+static const struct file_operations mbox_operations = {
+ .owner = THIS_MODULE,
+ .open = mbox_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
+static irqreturn_t mbox_irq(int irq, void *arg)
+{
+ u32 mbox_value;
+ int nbr_occup;
+ int nbr_free;
+ struct mbox *mbox = (struct mbox *) arg;
+
+ spin_lock(&mbox->lock);
+
+ dev_dbg(&(mbox->pdev->dev),
+ "mbox IRQ [%d] received. ri = %d, wi = %d\n",
+ irq, mbox->read_index, mbox->write_index);
+
+ /*
+ * Check if we have any outgoing messages, and if there is space for
+ * them in the FIFO.
+ */
+ if (mbox->read_index != mbox->write_index) {
+ /*
+ * Check by reading FREE for LOCAL since that indicates
+ * OCCUP for PEER
+ */
+ nbr_free = (readl(mbox->virtbase_local + MBOX_FIFO_STATUS)
+ >> 4) & 0x7;
+ dev_dbg(&(mbox->pdev->dev),
+ "Status indicates %d empty spaces in the FIFO!\n",
+ nbr_free);
+
+ while ((nbr_free > 0) &&
+ (mbox->read_index != mbox->write_index)) {
+ /* Write the message and latch it into the FIFO */
+ writel(mbox->buffer[mbox->read_index],
+ (mbox->virtbase_peer + MBOX_FIFO_DATA));
+ writel(MBOX_LATCH,
+ (mbox->virtbase_peer + MBOX_FIFO_ADD));
+ dev_dbg(&(mbox->pdev->dev),
+ "Wrote message 0x%X to addr 0x%X\n",
+ mbox->buffer[mbox->read_index],
+ (u32) (mbox->virtbase_peer + MBOX_FIFO_DATA));
+
+ nbr_free--;
+ mbox->read_index =
+ (mbox->read_index + 1) % MBOX_BUF_SIZE;
+ }
+
+ /*
+ * Check if we still want IRQ:s when there is free
+ * space to send
+ */
+ if (mbox->read_index != mbox->write_index) {
+ dev_dbg(&(mbox->pdev->dev),
+ "Still have messages to send, but FIFO full. "
+ "Request IRQ again!\n");
+ writel(MBOX_ENABLE_IRQ,
+ mbox->virtbase_peer + MBOX_FIFO_THRES_FREE);
+ } else {
+ dev_dbg(&(mbox->pdev->dev),
+ "No more messages to send. "
+ "Do not request IRQ again!\n");
+ writel(MBOX_DISABLE_IRQ,
+ mbox->virtbase_peer + MBOX_FIFO_THRES_FREE);
+ }
+
+ /*
+ * Check if we can signal any blocked clients that it is OK to
+ * start buffering again
+ */
+ if (mbox->client_blocked &&
+ (((mbox->write_index + 1) % MBOX_BUF_SIZE)
+ != mbox->read_index)) {
+ dev_dbg(&(mbox->pdev->dev),
+ "Waking up blocked client\n");
+ complete(&mbox->buffer_available);
+ mbox->client_blocked = 0;
+ }
+ }
+
+ /* Check if we have any incoming messages */
+ nbr_occup = readl(mbox->virtbase_local + MBOX_FIFO_STATUS) & 0x7;
+ if (nbr_occup == 0)
+ goto exit;
+
+ if (mbox->cb == NULL) {
+ dev_dbg(&(mbox->pdev->dev), "No receive callback registered, "
+ "leaving %d incoming messages in fifo!\n", nbr_occup);
+ goto exit;
+ }
+
+ /* Read and acknowledge the message */
+ mbox_value = readl(mbox->virtbase_local + MBOX_FIFO_DATA);
+ writel(MBOX_LATCH, (mbox->virtbase_local + MBOX_FIFO_REMOVE));
+
+ /* Notify consumer of new mailbox message */
+ dev_dbg(&(mbox->pdev->dev), "Calling callback for message 0x%X!\n",
+ mbox_value);
+ mbox->cb(mbox_value, mbox->client_data);
+
+exit:
+ dev_dbg(&(mbox->pdev->dev), "Exit mbox IRQ. ri = %d, wi = %d\n",
+ mbox->read_index, mbox->write_index);
+ spin_unlock(&mbox->lock);
+
+ return IRQ_HANDLED;
+}
+
+/* Setup is executed once for each mbox pair */
+struct mbox *mbox_setup(u8 mbox_id, mbox_recv_cb_t *mbox_cb, void *priv)
+{
+ struct resource *resource;
+ int irq;
+ int res;
+ struct mbox *mbox;
+
+ mbox = get_mbox_with_id(mbox_id);
+ if (mbox == NULL) {
+ dev_err(&(mbox->pdev->dev), "Incorrect mailbox id: %d!\n",
+ mbox_id);
+ goto exit;
+ }
+
+ /*
+ * Check if mailbox has been allocated to someone else,
+ * otherwise allocate it
+ */
+ if (mbox->allocated) {
+ dev_err(&(mbox->pdev->dev), "Mailbox number %d is busy!\n",
+ mbox_id);
+ mbox = NULL;
+ goto exit;
+ }
+ mbox->allocated = true;
+
+ dev_dbg(&(mbox->pdev->dev), "Initiating mailbox number %d: 0x%X...\n",
+ mbox_id, (u32)mbox);
+
+ mbox->client_data = priv;
+ mbox->cb = mbox_cb;
+
+ /* Get addr for peer mailbox and ioremap it */
+ resource = platform_get_resource_byname(mbox->pdev,
+ IORESOURCE_MEM,
+ "mbox_peer");
+ if (resource == NULL) {
+ dev_err(&(mbox->pdev->dev),
+ "Unable to retrieve mbox peer resource\n");
+ mbox = NULL;
+ goto exit;
+ }
+ dev_dbg(&(mbox->pdev->dev),
+ "Resource name: %s start: 0x%X, end: 0x%X\n",
+ resource->name, resource->start, resource->end);
+ mbox->virtbase_peer =
+ ioremap(resource->start, resource->end - resource->start);
+ if (!mbox->virtbase_peer) {
+ dev_err(&(mbox->pdev->dev), "Unable to ioremap peer mbox\n");
+ mbox = NULL;
+ goto exit;
+ }
+ dev_dbg(&(mbox->pdev->dev),
+ "ioremapped peer physical: (0x%X-0x%X) to virtual: 0x%X\n",
+ resource->start, resource->end, (u32) mbox->virtbase_peer);
+
+ /* Get addr for local mailbox and ioremap it */
+ resource = platform_get_resource_byname(mbox->pdev,
+ IORESOURCE_MEM,
+ "mbox_local");
+ if (resource == NULL) {
+ dev_err(&(mbox->pdev->dev),
+ "Unable to retrieve mbox local resource\n");
+ mbox = NULL;
+ goto exit;
+ }
+ dev_dbg(&(mbox->pdev->dev),
+ "Resource name: %s start: 0x%X, end: 0x%X\n",
+ resource->name, resource->start, resource->end);
+ mbox->virtbase_local =
+ ioremap(resource->start, resource->end - resource->start);
+ if (!mbox->virtbase_local) {
+ dev_err(&(mbox->pdev->dev), "Unable to ioremap local mbox\n");
+ mbox = NULL;
+ goto exit;
+ }
+ dev_dbg(&(mbox->pdev->dev),
+ "ioremapped local physical: (0x%X-0x%X) to virtual: 0x%X\n",
+ resource->start, resource->end, (u32) mbox->virtbase_peer);
+
+ init_completion(&mbox->buffer_available);
+ mbox->client_blocked = 0;
+
+ /* Get IRQ for mailbox and allocate it */
+ irq = platform_get_irq_byname(mbox->pdev, "mbox_irq");
+ if (irq < 0) {
+ dev_err(&(mbox->pdev->dev),
+ "Unable to retrieve mbox irq resource\n");
+ mbox = NULL;
+ goto exit;
+ }
+
+ dev_dbg(&(mbox->pdev->dev), "Allocating irq %d...\n", irq);
+ res = request_irq(irq, mbox_irq, 0, mbox->name, (void *) mbox);
+ if (res < 0) {
+ dev_err(&(mbox->pdev->dev),
+ "Unable to allocate mbox irq %d\n", irq);
+ mbox = NULL;
+ goto exit;
+ }
+
+ /* Set up mailbox to not launch IRQ on free space in mailbox */
+ writel(MBOX_DISABLE_IRQ, mbox->virtbase_peer + MBOX_FIFO_THRES_FREE);
+
+ /*
+ * Set up mailbox to launch IRQ on new message if we have
+ * a callback set. If not, do not raise IRQ, but keep message
+ * in FIFO for manual retrieval
+ */
+ if (mbox_cb != NULL)
+ writel(MBOX_ENABLE_IRQ,
+ mbox->virtbase_local + MBOX_FIFO_THRES_OCCUP);
+ else
+ writel(MBOX_DISABLE_IRQ,
+ mbox->virtbase_local + MBOX_FIFO_THRES_OCCUP);
+
+#if defined(CONFIG_DEBUG_FS)
+ res = device_create_file(&(mbox->pdev->dev), &dev_attr_fifo);
+ if (res != 0)
+ dev_warn(&(mbox->pdev->dev),
+ "Unable to create mbox sysfs entry");
+
+ (void) debugfs_create_file("mbox", S_IFREG | S_IRUGO, NULL,
+ NULL, &mbox_operations);
+#endif
+
+ dev_info(&(mbox->pdev->dev),
+ "Mailbox driver with index %d initated!\n", mbox_id);
+
+exit:
+ return mbox;
+}
+EXPORT_SYMBOL(mbox_setup);
+
+
+int __init mbox_probe(struct platform_device *pdev)
+{
+ struct mbox local_mbox;
+ struct mbox *mbox;
+ int res = 0;
+ dev_dbg(&(pdev->dev), "Probing mailbox (pdev = 0x%X)...\n", (u32) pdev);
+
+ memset(&local_mbox, 0x0, sizeof(struct mbox));
+
+ /* Associate our mbox data with the platform device */
+ res = platform_device_add_data(pdev,
+ (void *) &local_mbox,
+ sizeof(struct mbox));
+ if (res != 0) {
+ dev_err(&(pdev->dev),
+ "Unable to allocate driver platform data!\n");
+ goto exit;
+ }
+
+ mbox = (struct mbox *) pdev->dev.platform_data;
+ mbox->pdev = pdev;
+ mbox->write_index = 0;
+ mbox->read_index = 0;
+
+ INIT_LIST_HEAD(&(mbox->list));
+ list_add_tail(&(mbox->list), &mboxs);
+
+ sprintf(mbox->name, "%s", MBOX_NAME);
+ spin_lock_init(&mbox->lock);
+
+ dev_info(&(pdev->dev), "Mailbox driver loaded\n");
+
+exit:
+ return res;
+}
+
+static struct platform_driver mbox_driver = {
+ .driver = {
+ .name = MBOX_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init mbox_init(void)
+{
+ return platform_driver_probe(&mbox_driver, mbox_probe);
+}
+
+module_init(mbox_init);
+
+void __exit mbox_exit(void)
+{
+ platform_driver_unregister(&mbox_driver);
+}
+
+module_exit(mbox_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MBOX driver");
diff --git a/arch/arm/mach-ux500/modem_irq.c b/arch/arm/mach-ux500/modem_irq.c
new file mode 100644
index 0000000..3187f88
--- /dev/null
+++ b/arch/arm/mach-ux500/modem_irq.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Stefan Nilsson <stefan.xk.nilsson@stericsson.com> for ST-Ericsson.
+ * Author: Martin Persson <martin.persson@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#define MODEM_INTCON_BASE_ADDR 0xBFFD3000
+#define MODEM_INTCON_SIZE 0xFFF
+
+#define DEST_IRQ41_OFFSET 0x2A4
+#define DEST_IRQ43_OFFSET 0x2AC
+#define DEST_IRQ45_OFFSET 0x2B4
+
+#define PRIO_IRQ41_OFFSET 0x6A4
+#define PRIO_IRQ43_OFFSET 0x6AC
+#define PRIO_IRQ45_OFFSET 0x6B4
+
+#define ALLOW_IRQ_OFFSET 0x104
+
+#define MODEM_INTCON_CPU_NBR 0x1
+#define MODEM_INTCON_PRIO_HIGH 0x0
+
+#define MODEM_INTCON_ALLOW_IRQ41 0x0200
+#define MODEM_INTCON_ALLOW_IRQ43 0x0800
+#define MODEM_INTCON_ALLOW_IRQ45 0x2000
+
+#define MODEM_IRQ_REG_OFFSET 0x4
+
+struct modem_irq {
+ void __iomem *modem_intcon_base;
+};
+
+
+static void setup_modem_intcon(void __iomem *modem_intcon_base)
+{
+ /* IC_DESTINATION_BASE_ARRAY - Which CPU to receive the IRQ */
+ writel(MODEM_INTCON_CPU_NBR, modem_intcon_base + DEST_IRQ41_OFFSET);
+ writel(MODEM_INTCON_CPU_NBR, modem_intcon_base + DEST_IRQ43_OFFSET);
+ writel(MODEM_INTCON_CPU_NBR, modem_intcon_base + DEST_IRQ45_OFFSET);
+
+ /* IC_PRIORITY_BASE_ARRAY - IRQ priority in modem IRQ controller */
+ writel(MODEM_INTCON_PRIO_HIGH, modem_intcon_base + PRIO_IRQ41_OFFSET);
+ writel(MODEM_INTCON_PRIO_HIGH, modem_intcon_base + PRIO_IRQ43_OFFSET);
+ writel(MODEM_INTCON_PRIO_HIGH, modem_intcon_base + PRIO_IRQ45_OFFSET);
+
+ /* IC_ALLOW_ARRAY - IRQ enable */
+ writel(MODEM_INTCON_ALLOW_IRQ41 |
+ MODEM_INTCON_ALLOW_IRQ43 |
+ MODEM_INTCON_ALLOW_IRQ45,
+ modem_intcon_base + ALLOW_IRQ_OFFSET);
+}
+
+static irqreturn_t modem_cpu_irq_handler(int irq, void *data)
+{
+ int real_irq;
+ int virt_irq;
+ struct modem_irq *mi = (struct modem_irq *)data;
+
+ /* Read modem side IRQ number from modem IRQ controller */
+ real_irq = readl(mi->modem_intcon_base + MODEM_IRQ_REG_OFFSET) & 0xFF;
+ virt_irq = IRQ_MODEM_EVENTS_BASE + real_irq;
+
+ pr_debug("modem_irq: Worker read addr 0x%X and got value 0x%X "
+ "which will be 0x%X (%d) which translates to "
+ "virtual IRQ 0x%X (%d)!\n",
+ (u32)mi->modem_intcon_base + MODEM_IRQ_REG_OFFSET,
+ real_irq,
+ real_irq & 0xFF,
+ real_irq & 0xFF,
+ virt_irq,
+ virt_irq);
+
+ if (virt_irq != 0)
+ generic_handle_irq(virt_irq);
+
+ pr_debug("modem_irq: Done handling virtual IRQ %d!\n", virt_irq);
+
+ return IRQ_HANDLED;
+}
+
+static void create_virtual_irq(int irq, struct irq_chip *modem_irq_chip)
+{
+ set_irq_chip(irq, modem_irq_chip);
+ set_irq_handler(irq, handle_simple_irq);
+ set_irq_flags(irq, IRQF_VALID);
+
+ pr_debug("modem_irq: Created virtual IRQ %d\n", irq);
+}
+
+static int modem_irq_init(void)
+{
+ int err;
+ static struct irq_chip modem_irq_chip;
+ struct modem_irq *mi;
+
+ pr_info("modem_irq: Set up IRQ handler for incoming modem IRQ %d\n",
+ IRQ_DB5500_MODEM);
+
+ mi = kmalloc(sizeof(struct modem_irq), GFP_KERNEL);
+ if (!mi) {
+ pr_err("modem_irq: Could not allocate device\n");
+ return -ENOMEM;
+ }
+
+ mi->modem_intcon_base =
+ ioremap(MODEM_INTCON_BASE_ADDR, MODEM_INTCON_SIZE);
+ pr_debug("modem_irq: ioremapped modem_intcon_base from "
+ "phy 0x%x to virt 0x%x\n", MODEM_INTCON_BASE_ADDR,
+ (u32)mi->modem_intcon_base);
+
+ setup_modem_intcon(mi->modem_intcon_base);
+
+ modem_irq_chip = dummy_irq_chip;
+ modem_irq_chip.name = "modem_irq";
+
+ /* Create the virtual IRQ:s needed */
+ create_virtual_irq(MBOX_PAIR0_VIRT_IRQ, &modem_irq_chip);
+ create_virtual_irq(MBOX_PAIR1_VIRT_IRQ, &modem_irq_chip);
+ create_virtual_irq(MBOX_PAIR2_VIRT_IRQ, &modem_irq_chip);
+
+ err = request_threaded_irq(IRQ_DB5500_MODEM, NULL,
+ modem_cpu_irq_handler, IRQF_ONESHOT,
+ "modem_irq", mi);
+ if (err)
+ pr_err("modem_irq: Could not register IRQ %d\n",
+ IRQ_DB5500_MODEM);
+
+ return 0;
+}
+
+arch_initcall(modem_irq_init);
diff --git a/arch/arm/mach-ux500/pins-db5500.h b/arch/arm/mach-ux500/pins-db5500.h
new file mode 100644
index 0000000..bf50c21
--- /dev/null
+++ b/arch/arm/mach-ux500/pins-db5500.h
@@ -0,0 +1,620 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License terms: GNU General Public License, version 2
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com>
+ */
+
+#ifndef __MACH_DB5500_PINS_H
+#define __MACH_DB5500_PINS_H
+
+#define GPIO0_GPIO PIN_CFG(0, GPIO)
+#define GPIO0_SM_CS3n PIN_CFG(0, ALT_A)
+
+#define GPIO1_GPIO PIN_CFG(1, GPIO)
+#define GPIO1_SM_A3 PIN_CFG(1, ALT_A)
+
+#define GPIO2_GPIO PIN_CFG(2, GPIO)
+#define GPIO2_SM_A4 PIN_CFG(2, ALT_A)
+#define GPIO2_SM_AVD PIN_CFG(2, ALT_B)
+
+#define GPIO3_GPIO PIN_CFG(3, GPIO)
+#define GPIO3_I2C1_SCL PIN_CFG(3, ALT_A)
+
+#define GPIO4_GPIO PIN_CFG(4, GPIO)
+#define GPIO4_I2C1_SDA PIN_CFG(4, ALT_A)
+
+#define GPIO5_GPIO PIN_CFG(5, GPIO)
+#define GPIO5_MC0_DAT0 PIN_CFG(5, ALT_A)
+#define GPIO5_SM_ADQ8 PIN_CFG(5, ALT_B)
+
+#define GPIO6_GPIO PIN_CFG(6, GPIO)
+#define GPIO6_MC0_DAT1 PIN_CFG(6, ALT_A)
+#define GPIO6_SM_ADQ0 PIN_CFG(6, ALT_B)
+
+#define GPIO7_GPIO PIN_CFG(7, GPIO)
+#define GPIO7_MC0_DAT2 PIN_CFG(7, ALT_A)
+#define GPIO7_SM_ADQ9 PIN_CFG(7, ALT_B)
+
+#define GPIO8_GPIO PIN_CFG(8, GPIO)
+#define GPIO8_MC0_DAT3 PIN_CFG(8, ALT_A)
+#define GPIO8_SM_ADQ1 PIN_CFG(8, ALT_B)
+
+#define GPIO9_GPIO PIN_CFG(9, GPIO)
+#define GPIO9_MC0_DAT4 PIN_CFG(9, ALT_A)
+#define GPIO9_SM_ADQ10 PIN_CFG(9, ALT_B)
+
+#define GPIO10_GPIO PIN_CFG(10, GPIO)
+#define GPIO10_MC0_DAT5 PIN_CFG(10, ALT_A)
+#define GPIO10_SM_ADQ2 PIN_CFG(10, ALT_B)
+
+#define GPIO11_GPIO PIN_CFG(11, GPIO)
+#define GPIO11_MC0_DAT6 PIN_CFG(11, ALT_A)
+#define GPIO11_SM_ADQ11 PIN_CFG(11, ALT_B)
+
+#define GPIO12_GPIO PIN_CFG(12, GPIO)
+#define GPIO12_MC0_DAT7 PIN_CFG(12, ALT_A)
+#define GPIO12_SM_ADQ3 PIN_CFG(12, ALT_B)
+
+#define GPIO13_GPIO PIN_CFG(13, GPIO)
+#define GPIO13_MC0_CMD PIN_CFG(13, ALT_A)
+#define GPIO13_SM_BUSY0n PIN_CFG(13, ALT_B)
+#define GPIO13_SM_WAIT0n PIN_CFG(13, ALT_C)
+
+#define GPIO14_GPIO PIN_CFG(14, GPIO)
+#define GPIO14_MC0_CLK PIN_CFG(14, ALT_A)
+#define GPIO14_SM_CS1n PIN_CFG(14, ALT_B)
+#define GPIO14_SM_CKO PIN_CFG(14, ALT_C)
+
+#define GPIO15_GPIO PIN_CFG(15, GPIO)
+#define GPIO15_SM_A5 PIN_CFG(15, ALT_A)
+#define GPIO15_SM_CLE PIN_CFG(15, ALT_B)
+
+#define GPIO16_GPIO PIN_CFG(16, GPIO)
+#define GPIO16_MC2_CMD PIN_CFG(16, ALT_A)
+#define GPIO16_SM_OEn PIN_CFG(16, ALT_B)
+
+#define GPIO17_GPIO PIN_CFG(17, GPIO)
+#define GPIO17_MC2_CLK PIN_CFG(17, ALT_A)
+#define GPIO17_SM_WEn PIN_CFG(17, ALT_B)
+
+#define GPIO18_GPIO PIN_CFG(18, GPIO)
+#define GPIO18_SM_A6 PIN_CFG(18, ALT_A)
+#define GPIO18_SM_ALE PIN_CFG(18, ALT_B)
+#define GPIO18_SM_AVDn PIN_CFG(18, ALT_C)
+
+#define GPIO19_GPIO PIN_CFG(19, GPIO)
+#define GPIO19_MC2_DAT1 PIN_CFG(19, ALT_A)
+#define GPIO19_SM_ADQ4 PIN_CFG(19, ALT_B)
+
+#define GPIO20_GPIO PIN_CFG(20, GPIO)
+#define GPIO20_MC2_DAT3 PIN_CFG(20, ALT_A)
+#define GPIO20_SM_ADQ5 PIN_CFG(20, ALT_B)
+
+#define GPIO21_GPIO PIN_CFG(21, GPIO)
+#define GPIO21_MC2_DAT5 PIN_CFG(21, ALT_A)
+#define GPIO21_SM_ADQ6 PIN_CFG(21, ALT_B)
+
+#define GPIO22_GPIO PIN_CFG(22, GPIO)
+#define GPIO22_MC2_DAT7 PIN_CFG(22, ALT_A)
+#define GPIO22_SM_ADQ7 PIN_CFG(22, ALT_B)
+
+#define GPIO23_GPIO PIN_CFG(23, GPIO)
+#define GPIO23_MC2_DAT0 PIN_CFG(23, ALT_A)
+#define GPIO23_SM_ADQ12 PIN_CFG(23, ALT_B)
+#define GPIO23_MC0_DAT1 PIN_CFG(23, ALT_C)
+
+#define GPIO24_GPIO PIN_CFG(24, GPIO)
+#define GPIO24_MC2_DAT2 PIN_CFG(24, ALT_A)
+#define GPIO24_SM_ADQ13 PIN_CFG(24, ALT_B)
+#define GPIO24_MC0_DAT3 PIN_CFG(24, ALT_C)
+
+#define GPIO25_GPIO PIN_CFG(25, GPIO)
+#define GPIO25_MC2_DAT4 PIN_CFG(25, ALT_A)
+#define GPIO25_SM_ADQ14 PIN_CFG(25, ALT_B)
+#define GPIO25_MC0_CMD PIN_CFG(25, ALT_C)
+
+#define GPIO26_GPIO PIN_CFG(26, GPIO)
+#define GPIO26_MC2_DAT6 PIN_CFG(26, ALT_A)
+#define GPIO26_SM_ADQ15 PIN_CFG(26, ALT_B)
+
+#define GPIO27_GPIO PIN_CFG(27, GPIO)
+#define GPIO27_SM_CS0n PIN_CFG(27, ALT_A)
+#define GPIO27_SM_PS0n PIN_CFG(27, ALT_B)
+
+#define GPIO28_GPIO PIN_CFG(28, GPIO)
+#define GPIO28_U0_TXD PIN_CFG(28, ALT_A)
+#define GPIO28_SM_A0 PIN_CFG(28, ALT_B)
+
+#define GPIO29_GPIO PIN_CFG(29, GPIO)
+#define GPIO29_U0_RXD PIN_CFG(29, ALT_A)
+#define GPIO29_SM_A1 PIN_CFG(29, ALT_B)
+#define GPIO29_PWM_0 PIN_CFG(29, ALT_C)
+
+#define GPIO30_GPIO PIN_CFG(30, GPIO)
+#define GPIO30_MC0_DAT5 PIN_CFG(30, ALT_A)
+#define GPIO30_SM_A2 PIN_CFG(30, ALT_B)
+#define GPIO30_PWM_1 PIN_CFG(30, ALT_C)
+
+#define GPIO31_GPIO PIN_CFG(31, GPIO)
+#define GPIO31_MC0_DAT7 PIN_CFG(31, ALT_A)
+#define GPIO31_SM_CS2n PIN_CFG(31, ALT_B)
+#define GPIO31_PWM_2 PIN_CFG(31, ALT_C)
+
+#define GPIO32_GPIO PIN_CFG(32, GPIO)
+#define GPIO32_MSP0_TCK PIN_CFG(32, ALT_A)
+#define GPIO32_ACCI2S0_SCK PIN_CFG(32, ALT_B)
+
+#define GPIO33_GPIO PIN_CFG(33, GPIO)
+#define GPIO33_MSP0_TFS PIN_CFG(33, ALT_A)
+#define GPIO33_ACCI2S0_WS PIN_CFG(33, ALT_B)
+
+#define GPIO34_GPIO PIN_CFG(34, GPIO)
+#define GPIO34_MSP0_TXD PIN_CFG(34, ALT_A)
+#define GPIO34_ACCI2S0_DLD PIN_CFG(34, ALT_B)
+
+#define GPIO35_GPIO PIN_CFG(35, GPIO)
+#define GPIO35_MSP0_RXD PIN_CFG(35, ALT_A)
+#define GPIO35_ACCI2S0_ULD PIN_CFG(35, ALT_B)
+
+#define GPIO64_GPIO PIN_CFG(64, GPIO)
+#define GPIO64_USB_DAT0 PIN_CFG(64, ALT_A)
+#define GPIO64_U0_TXD PIN_CFG(64, ALT_B)
+
+#define GPIO65_GPIO PIN_CFG(65, GPIO)
+#define GPIO65_USB_DAT1 PIN_CFG(65, ALT_A)
+#define GPIO65_U0_RXD PIN_CFG(65, ALT_B)
+
+#define GPIO66_GPIO PIN_CFG(66, GPIO)
+#define GPIO66_USB_DAT2 PIN_CFG(66, ALT_A)
+
+#define GPIO67_GPIO PIN_CFG(67, GPIO)
+#define GPIO67_USB_DAT3 PIN_CFG(67, ALT_A)
+
+#define GPIO68_GPIO PIN_CFG(68, GPIO)
+#define GPIO68_USB_DAT4 PIN_CFG(68, ALT_A)
+
+#define GPIO69_GPIO PIN_CFG(69, GPIO)
+#define GPIO69_USB_DAT5 PIN_CFG(69, ALT_A)
+
+#define GPIO70_GPIO PIN_CFG(70, GPIO)
+#define GPIO70_USB_DAT6 PIN_CFG(70, ALT_A)
+
+#define GPIO71_GPIO PIN_CFG(71, GPIO)
+#define GPIO71_USB_DAT7 PIN_CFG(71, ALT_A)
+
+#define GPIO72_GPIO PIN_CFG(72, GPIO)
+#define GPIO72_USB_STP PIN_CFG(72, ALT_A)
+
+#define GPIO73_GPIO PIN_CFG(73, GPIO)
+#define GPIO73_USB_DIR PIN_CFG(73, ALT_A)
+
+#define GPIO74_GPIO PIN_CFG(74, GPIO)
+#define GPIO74_USB_NXT PIN_CFG(74, ALT_A)
+
+#define GPIO75_GPIO PIN_CFG(75, GPIO)
+#define GPIO75_USB_XCLK PIN_CFG(75, ALT_A)
+
+#define GPIO76_GPIO PIN_CFG(76, GPIO)
+
+#define GPIO77_GPIO PIN_CFG(77, GPIO)
+#define GPIO77_ACCTX_ON PIN_CFG(77, ALT_A)
+
+#define GPIO78_GPIO PIN_CFG(78, GPIO)
+#define GPIO78_IRQn PIN_CFG(78, ALT_A)
+
+#define GPIO79_GPIO PIN_CFG(79, GPIO)
+#define GPIO79_ACCSIM_Clk PIN_CFG(79, ALT_A)
+
+#define GPIO80_GPIO PIN_CFG(80, GPIO)
+#define GPIO80_ACCSIM_Da PIN_CFG(80, ALT_A)
+
+#define GPIO81_GPIO PIN_CFG(81, GPIO)
+#define GPIO81_ACCSIM_Reset PIN_CFG(81, ALT_A)
+
+#define GPIO82_GPIO PIN_CFG(82, GPIO)
+#define GPIO82_ACCSIM_DDir PIN_CFG(82, ALT_A)
+
+#define GPIO96_GPIO PIN_CFG(96, GPIO)
+#define GPIO96_MSP1_TCK PIN_CFG(96, ALT_A)
+#define GPIO96_PRCMU_DEBUG3 PIN_CFG(96, ALT_B)
+#define GPIO96_PRCMU_DEBUG7 PIN_CFG(96, ALT_C)
+
+#define GPIO97_GPIO PIN_CFG(97, GPIO)
+#define GPIO97_MSP1_TFS PIN_CFG(97, ALT_A)
+#define GPIO97_PRCMU_DEBUG2 PIN_CFG(97, ALT_B)
+#define GPIO97_PRCMU_DEBUG6 PIN_CFG(97, ALT_C)
+
+#define GPIO98_GPIO PIN_CFG(98, GPIO)
+#define GPIO98_MSP1_TXD PIN_CFG(98, ALT_A)
+#define GPIO98_PRCMU_DEBUG1 PIN_CFG(98, ALT_B)
+#define GPIO98_PRCMU_DEBUG5 PIN_CFG(98, ALT_C)
+
+#define GPIO99_GPIO PIN_CFG(99, GPIO)
+#define GPIO99_MSP1_RXD PIN_CFG(99, ALT_A)
+#define GPIO99_PRCMU_DEBUG0 PIN_CFG(99, ALT_B)
+#define GPIO99_PRCMU_DEBUG4 PIN_CFG(99, ALT_C)
+
+#define GPIO100_GPIO PIN_CFG(100, GPIO)
+#define GPIO100_I2C0_SCL PIN_CFG(100, ALT_A)
+
+#define GPIO101_GPIO PIN_CFG(101, GPIO)
+#define GPIO101_I2C0_SDA PIN_CFG(101, ALT_A)
+
+#define GPIO128_GPIO PIN_CFG(128, GPIO)
+#define GPIO128_KP_I0 PIN_CFG(128, ALT_A)
+#define GPIO128_BUSMON_D0 PIN_CFG(128, ALT_B)
+
+#define GPIO129_GPIO PIN_CFG(129, GPIO)
+#define GPIO129_KP_O0 PIN_CFG(129, ALT_A)
+#define GPIO129_BUSMON_D1 PIN_CFG(129, ALT_B)
+
+#define GPIO130_GPIO PIN_CFG(130, GPIO)
+#define GPIO130_KP_I1 PIN_CFG(130, ALT_A)
+#define GPIO130_BUSMON_D2 PIN_CFG(130, ALT_B)
+
+#define GPIO131_GPIO PIN_CFG(131, GPIO)
+#define GPIO131_KP_O1 PIN_CFG(131, ALT_A)
+#define GPIO131_BUSMON_D3 PIN_CFG(131, ALT_B)
+
+#define GPIO132_GPIO PIN_CFG(132, GPIO)
+#define GPIO132_KP_I2 PIN_CFG(132, ALT_A)
+#define GPIO132_ETM_D15 PIN_CFG(132, ALT_B)
+#define GPIO132_STMAPE_CLK PIN_CFG(132, ALT_C)
+
+#define GPIO133_GPIO PIN_CFG(133, GPIO)
+#define GPIO133_KP_O2 PIN_CFG(133, ALT_A)
+#define GPIO133_ETM_D14 PIN_CFG(133, ALT_B)
+#define GPIO133_U0_RXD PIN_CFG(133, ALT_C)
+
+#define GPIO134_GPIO PIN_CFG(134, GPIO)
+#define GPIO134_KP_I3 PIN_CFG(134, ALT_A)
+#define GPIO134_ETM_D13 PIN_CFG(134, ALT_B)
+#define GPIO134_STMAPE_DAT0 PIN_CFG(134, ALT_C)
+
+#define GPIO135_GPIO PIN_CFG(135, GPIO)
+#define GPIO135_KP_O3 PIN_CFG(135, ALT_A)
+#define GPIO135_ETM_D12 PIN_CFG(135, ALT_B)
+#define GPIO135_STMAPE_DAT1 PIN_CFG(135, ALT_C)
+
+#define GPIO136_GPIO PIN_CFG(136, GPIO)
+#define GPIO136_KP_I4 PIN_CFG(136, ALT_A)
+#define GPIO136_ETM_D11 PIN_CFG(136, ALT_B)
+#define GPIO136_STMAPE_DAT2 PIN_CFG(136, ALT_C)
+
+#define GPIO137_GPIO PIN_CFG(137, GPIO)
+#define GPIO137_KP_O4 PIN_CFG(137, ALT_A)
+#define GPIO137_ETM_D10 PIN_CFG(137, ALT_B)
+#define GPIO137_STMAPE_DAT3 PIN_CFG(137, ALT_C)
+
+#define GPIO138_GPIO PIN_CFG(138, GPIO)
+#define GPIO138_KP_I5 PIN_CFG(138, ALT_A)
+#define GPIO138_ETM_D9 PIN_CFG(138, ALT_B)
+#define GPIO138_U0_TXD PIN_CFG(138, ALT_C)
+
+#define GPIO139_GPIO PIN_CFG(139, GPIO)
+#define GPIO139_KP_O5 PIN_CFG(139, ALT_A)
+#define GPIO139_ETM_D8 PIN_CFG(139, ALT_B)
+#define GPIO139_BUSMON_D11 PIN_CFG(139, ALT_C)
+
+#define GPIO140_GPIO PIN_CFG(140, GPIO)
+#define GPIO140_KP_I6 PIN_CFG(140, ALT_A)
+#define GPIO140_ETM_D7 PIN_CFG(140, ALT_B)
+#define GPIO140_STMAPE_CLK PIN_CFG(140, ALT_C)
+
+#define GPIO141_GPIO PIN_CFG(141, GPIO)
+#define GPIO141_KP_O6 PIN_CFG(141, ALT_A)
+#define GPIO141_ETM_D6 PIN_CFG(141, ALT_B)
+#define GPIO141_U0_RXD PIN_CFG(141, ALT_C)
+
+#define GPIO142_GPIO PIN_CFG(142, GPIO)
+#define GPIO142_KP_I7 PIN_CFG(142, ALT_A)
+#define GPIO142_ETM_D5 PIN_CFG(142, ALT_B)
+#define GPIO142_STMAPE_DAT0 PIN_CFG(142, ALT_C)
+
+#define GPIO143_GPIO PIN_CFG(143, GPIO)
+#define GPIO143_KP_O7 PIN_CFG(143, ALT_A)
+#define GPIO143_ETM_D4 PIN_CFG(143, ALT_B)
+#define GPIO143_STMAPE_DAT1 PIN_CFG(143, ALT_C)
+
+#define GPIO144_GPIO PIN_CFG(144, GPIO)
+#define GPIO144_I2C3_SCL PIN_CFG(144, ALT_A)
+#define GPIO144_ETM_D3 PIN_CFG(144, ALT_B)
+#define GPIO144_STMAPE_DAT2 PIN_CFG(144, ALT_C)
+
+#define GPIO145_GPIO PIN_CFG(145, GPIO)
+#define GPIO145_I2C3_SDA PIN_CFG(145, ALT_A)
+#define GPIO145_ETM_D2 PIN_CFG(145, ALT_B)
+#define GPIO145_STMAPE_DAT3 PIN_CFG(145, ALT_C)
+
+#define GPIO146_GPIO PIN_CFG(146, GPIO)
+#define GPIO146_PWM_0 PIN_CFG(146, ALT_A)
+#define GPIO146_ETM_D1 PIN_CFG(146, ALT_B)
+
+#define GPIO147_GPIO PIN_CFG(147, GPIO)
+#define GPIO147_PWM_1 PIN_CFG(147, ALT_A)
+#define GPIO147_ETM_D0 PIN_CFG(147, ALT_B)
+
+#define GPIO148_GPIO PIN_CFG(148, GPIO)
+#define GPIO148_PWM_2 PIN_CFG(148, ALT_A)
+#define GPIO148_ETM_CLK PIN_CFG(148, ALT_B)
+
+#define GPIO160_GPIO PIN_CFG(160, GPIO)
+#define GPIO160_CLKOUT_REQn PIN_CFG(160, ALT_A)
+
+#define GPIO161_GPIO PIN_CFG(161, GPIO)
+#define GPIO161_CLKOUT_0 PIN_CFG(161, ALT_A)
+
+#define GPIO162_GPIO PIN_CFG(162, GPIO)
+#define GPIO162_CLKOUT_1 PIN_CFG(162, ALT_A)
+
+#define GPIO163_GPIO PIN_CFG(163, GPIO)
+
+#define GPIO164_GPIO PIN_CFG(164, GPIO)
+#define GPIO164_GPS_START PIN_CFG(164, ALT_A)
+
+#define GPIO165_GPIO PIN_CFG(165, GPIO)
+#define GPIO165_SPI1_CS2n PIN_CFG(165, ALT_A)
+#define GPIO165_U3_RXD PIN_CFG(165, ALT_B)
+#define GPIO165_BUSMON_D20 PIN_CFG(165, ALT_C)
+
+#define GPIO166_GPIO PIN_CFG(166, GPIO)
+#define GPIO166_SPI1_CS1n PIN_CFG(166, ALT_A)
+#define GPIO166_U3_TXD PIN_CFG(166, ALT_B)
+#define GPIO166_BUSMON_D21 PIN_CFG(166, ALT_C)
+
+#define GPIO167_GPIO PIN_CFG(167, GPIO)
+#define GPIO167_SPI1_CS0n PIN_CFG(167, ALT_A)
+#define GPIO167_U3_RTSn PIN_CFG(167, ALT_B)
+#define GPIO167_BUSMON_D22 PIN_CFG(167, ALT_C)
+
+#define GPIO168_GPIO PIN_CFG(168, GPIO)
+#define GPIO168_SPI1_RXD PIN_CFG(168, ALT_A)
+#define GPIO168_U3_CTSn PIN_CFG(168, ALT_B)
+#define GPIO168_BUSMON_D23 PIN_CFG(168, ALT_C)
+
+#define GPIO169_GPIO PIN_CFG(169, GPIO)
+#define GPIO169_SPI1_TXD PIN_CFG(169, ALT_A)
+#define GPIO169_DDR_RC PIN_CFG(169, ALT_B)
+#define GPIO169_BUSMON_D24 PIN_CFG(169, ALT_C)
+
+#define GPIO170_GPIO PIN_CFG(170, GPIO)
+#define GPIO170_SPI1_CLK PIN_CFG(170, ALT_A)
+
+#define GPIO171_GPIO PIN_CFG(171, GPIO)
+#define GPIO171_MC3_DAT0 PIN_CFG(171, ALT_A)
+#define GPIO171_SPI3_RXD PIN_CFG(171, ALT_B)
+#define GPIO171_BUSMON_D25 PIN_CFG(171, ALT_C)
+
+#define GPIO172_GPIO PIN_CFG(172, GPIO)
+#define GPIO172_MC3_DAT1 PIN_CFG(172, ALT_A)
+#define GPIO172_SPI3_CS1n PIN_CFG(172, ALT_B)
+#define GPIO172_BUSMON_D26 PIN_CFG(172, ALT_C)
+
+#define GPIO173_GPIO PIN_CFG(173, GPIO)
+#define GPIO173_MC3_DAT2 PIN_CFG(173, ALT_A)
+#define GPIO173_SPI3_CS2n PIN_CFG(173, ALT_B)
+#define GPIO173_BUSMON_D27 PIN_CFG(173, ALT_C)
+
+#define GPIO174_GPIO PIN_CFG(174, GPIO)
+#define GPIO174_MC3_DAT3 PIN_CFG(174, ALT_A)
+#define GPIO174_SPI3_CS0n PIN_CFG(174, ALT_B)
+#define GPIO174_BUSMON_D28 PIN_CFG(174, ALT_C)
+
+#define GPIO175_GPIO PIN_CFG(175, GPIO)
+#define GPIO175_MC3_CMD PIN_CFG(175, ALT_A)
+#define GPIO175_SPI3_TXD PIN_CFG(175, ALT_B)
+#define GPIO175_BUSMON_D29 PIN_CFG(175, ALT_C)
+
+#define GPIO176_GPIO PIN_CFG(176, GPIO)
+#define GPIO176_MC3_CLK PIN_CFG(176, ALT_A)
+#define GPIO176_SPI3_CLK PIN_CFG(176, ALT_B)
+
+#define GPIO177_GPIO PIN_CFG(177, GPIO)
+#define GPIO177_U2_RXD PIN_CFG(177, ALT_A)
+#define GPIO177_I2C3_SCL PIN_CFG(177, ALT_B)
+#define GPIO177_BUSMON_D30 PIN_CFG(177, ALT_C)
+
+#define GPIO178_GPIO PIN_CFG(178, GPIO)
+#define GPIO178_U2_TXD PIN_CFG(178, ALT_A)
+#define GPIO178_I2C3_SDA PIN_CFG(178, ALT_B)
+#define GPIO178_BUSMON_D31 PIN_CFG(178, ALT_C)
+
+#define GPIO179_GPIO PIN_CFG(179, GPIO)
+#define GPIO179_U2_CTSn PIN_CFG(179, ALT_A)
+#define GPIO179_U3_RXD PIN_CFG(179, ALT_B)
+#define GPIO179_BUSMON_D32 PIN_CFG(179, ALT_C)
+
+#define GPIO180_GPIO PIN_CFG(180, GPIO)
+#define GPIO180_U2_RTSn PIN_CFG(180, ALT_A)
+#define GPIO180_U3_TXD PIN_CFG(180, ALT_B)
+#define GPIO180_BUSMON_D33 PIN_CFG(180, ALT_C)
+
+#define GPIO185_GPIO PIN_CFG(185, GPIO)
+#define GPIO185_SPI3_CS2n PIN_CFG(185, ALT_A)
+#define GPIO185_MC4_DAT0 PIN_CFG(185, ALT_B)
+
+#define GPIO186_GPIO PIN_CFG(186, GPIO)
+#define GPIO186_SPI3_CS1n PIN_CFG(186, ALT_A)
+#define GPIO186_MC4_DAT1 PIN_CFG(186, ALT_B)
+
+#define GPIO187_GPIO PIN_CFG(187, GPIO)
+#define GPIO187_SPI3_CS0n PIN_CFG(187, ALT_A)
+#define GPIO187_MC4_DAT2 PIN_CFG(187, ALT_B)
+
+#define GPIO188_GPIO PIN_CFG(188, GPIO)
+#define GPIO188_SPI3_RXD PIN_CFG(188, ALT_A)
+#define GPIO188_MC4_DAT3 PIN_CFG(188, ALT_B)
+
+#define GPIO189_GPIO PIN_CFG(189, GPIO)
+#define GPIO189_SPI3_TXD PIN_CFG(189, ALT_A)
+#define GPIO189_MC4_CMD PIN_CFG(189, ALT_B)
+
+#define GPIO190_GPIO PIN_CFG(190, GPIO)
+#define GPIO190_SPI3_CLK PIN_CFG(190, ALT_A)
+#define GPIO190_MC4_CLK PIN_CFG(190, ALT_B)
+
+#define GPIO191_GPIO PIN_CFG(191, GPIO)
+#define GPIO191_MC1_DAT0 PIN_CFG(191, ALT_A)
+#define GPIO191_MC4_DAT4 PIN_CFG(191, ALT_B)
+#define GPIO191_STMAPE_DAT0 PIN_CFG(191, ALT_C)
+
+#define GPIO192_GPIO PIN_CFG(192, GPIO)
+#define GPIO192_MC1_DAT1 PIN_CFG(192, ALT_A)
+#define GPIO192_MC4_DAT5 PIN_CFG(192, ALT_B)
+#define GPIO192_STMAPE_DAT1 PIN_CFG(192, ALT_C)
+
+#define GPIO193_GPIO PIN_CFG(193, GPIO)
+#define GPIO193_MC1_DAT2 PIN_CFG(193, ALT_A)
+#define GPIO193_MC4_DAT6 PIN_CFG(193, ALT_B)
+#define GPIO193_STMAPE_DAT2 PIN_CFG(193, ALT_C)
+
+#define GPIO194_GPIO PIN_CFG(194, GPIO)
+#define GPIO194_MC1_DAT3 PIN_CFG(194, ALT_A)
+#define GPIO194_MC4_DAT7 PIN_CFG(194, ALT_B)
+#define GPIO194_STMAPE_DAT3 PIN_CFG(194, ALT_C)
+
+#define GPIO195_GPIO PIN_CFG(195, GPIO)
+#define GPIO195_MC1_CLK PIN_CFG(195, ALT_A)
+#define GPIO195_STMAPE_CLK PIN_CFG(195, ALT_B)
+#define GPIO195_BUSMON_CLK PIN_CFG(195, ALT_C)
+
+#define GPIO196_GPIO PIN_CFG(196, GPIO)
+#define GPIO196_MC1_CMD PIN_CFG(196, ALT_A)
+#define GPIO196_U0_RXD PIN_CFG(196, ALT_B)
+#define GPIO196_BUSMON_D38 PIN_CFG(196, ALT_C)
+
+#define GPIO197_GPIO PIN_CFG(197, GPIO)
+#define GPIO197_MC1_CMDDIR PIN_CFG(197, ALT_A)
+#define GPIO197_BUSMON_D39 PIN_CFG(197, ALT_B)
+
+#define GPIO198_GPIO PIN_CFG(198, GPIO)
+#define GPIO198_MC1_FBCLK PIN_CFG(198, ALT_A)
+
+#define GPIO199_GPIO PIN_CFG(199, GPIO)
+#define GPIO199_MC1_DAT0DIR PIN_CFG(199, ALT_A)
+#define GPIO199_BUSMON_D40 PIN_CFG(199, ALT_B)
+
+#define GPIO200_GPIO PIN_CFG(200, GPIO)
+#define GPIO200_U1_TXD PIN_CFG(200, ALT_A)
+#define GPIO200_ACCU0_RTSn PIN_CFG(200, ALT_B)
+
+#define GPIO201_GPIO PIN_CFG(201, GPIO)
+#define GPIO201_U1_RXD PIN_CFG(201, ALT_A)
+#define GPIO201_ACCU0_CTSn PIN_CFG(201, ALT_B)
+
+#define GPIO202_GPIO PIN_CFG(202, GPIO)
+#define GPIO202_U1_CTSn PIN_CFG(202, ALT_A)
+#define GPIO202_ACCU0_RXD PIN_CFG(202, ALT_B)
+
+#define GPIO203_GPIO PIN_CFG(203, GPIO)
+#define GPIO203_U1_RTSn PIN_CFG(203, ALT_A)
+#define GPIO203_ACCU0_TXD PIN_CFG(203, ALT_B)
+
+#define GPIO204_GPIO PIN_CFG(204, GPIO)
+#define GPIO204_SPI0_CS2n PIN_CFG(204, ALT_A)
+#define GPIO204_ACCGPIO_000 PIN_CFG(204, ALT_B)
+#define GPIO204_LCD_VSI1 PIN_CFG(204, ALT_C)
+
+#define GPIO205_GPIO PIN_CFG(205, GPIO)
+#define GPIO205_SPI0_CS1n PIN_CFG(205, ALT_A)
+#define GPIO205_ACCGPIO_001 PIN_CFG(205, ALT_B)
+#define GPIO205_LCD_D3 PIN_CFG(205, ALT_C)
+
+#define GPIO206_GPIO PIN_CFG(206, GPIO)
+#define GPIO206_SPI0_CS0n PIN_CFG(206, ALT_A)
+#define GPIO206_ACCGPIO_002 PIN_CFG(206, ALT_B)
+#define GPIO206_LCD_D2 PIN_CFG(206, ALT_C)
+
+#define GPIO207_GPIO PIN_CFG(207, GPIO)
+#define GPIO207_SPI0_RXD PIN_CFG(207, ALT_A)
+#define GPIO207_ACCGPIO_003 PIN_CFG(207, ALT_B)
+#define GPIO207_LCD_D1 PIN_CFG(207, ALT_C)
+
+#define GPIO208_GPIO PIN_CFG(208, GPIO)
+#define GPIO208_SPI0_TXD PIN_CFG(208, ALT_A)
+#define GPIO208_ACCGPIO_004 PIN_CFG(208, ALT_B)
+#define GPIO208_LCD_D0 PIN_CFG(208, ALT_C)
+
+#define GPIO209_GPIO PIN_CFG(209, GPIO)
+#define GPIO209_SPI0_CLK PIN_CFG(209, ALT_A)
+#define GPIO209_ACCGPIO_005 PIN_CFG(209, ALT_B)
+#define GPIO209_LCD_CLK PIN_CFG(209, ALT_C)
+
+#define GPIO210_GPIO PIN_CFG(210, GPIO)
+#define GPIO210_LCD_VSO PIN_CFG(210, ALT_A)
+#define GPIO210_PRCMU_PWRCTRL1 PIN_CFG(210, ALT_B)
+
+#define GPIO211_GPIO PIN_CFG(211, GPIO)
+#define GPIO211_LCD_VSI0 PIN_CFG(211, ALT_A)
+#define GPIO211_PRCMU_PWRCTRL2 PIN_CFG(211, ALT_B)
+
+#define GPIO212_GPIO PIN_CFG(212, GPIO)
+#define GPIO212_SPI2_CS2n PIN_CFG(212, ALT_A)
+#define GPIO212_LCD_HSO PIN_CFG(212, ALT_B)
+
+#define GPIO213_GPIO PIN_CFG(213, GPIO)
+#define GPIO213_SPI2_CS1n PIN_CFG(213, ALT_A)
+#define GPIO213_LCD_DE PIN_CFG(213, ALT_B)
+#define GPIO213_BUSMON_D16 PIN_CFG(213, ALT_C)
+
+#define GPIO214_GPIO PIN_CFG(214, GPIO)
+#define GPIO214_SPI2_CS0n PIN_CFG(214, ALT_A)
+#define GPIO214_LCD_D7 PIN_CFG(214, ALT_B)
+#define GPIO214_BUSMON_D17 PIN_CFG(214, ALT_C)
+
+#define GPIO215_GPIO PIN_CFG(215, GPIO)
+#define GPIO215_SPI2_RXD PIN_CFG(215, ALT_A)
+#define GPIO215_LCD_D6 PIN_CFG(215, ALT_B)
+#define GPIO215_BUSMON_D18 PIN_CFG(215, ALT_C)
+
+#define GPIO216_GPIO PIN_CFG(216, GPIO)
+#define GPIO216_SPI2_CLK PIN_CFG(216, ALT_A)
+#define GPIO216_LCD_D5 PIN_CFG(216, ALT_B)
+
+#define GPIO217_GPIO PIN_CFG(217, GPIO)
+#define GPIO217_SPI2_TXD PIN_CFG(217, ALT_A)
+#define GPIO217_LCD_D4 PIN_CFG(217, ALT_B)
+#define GPIO217_BUSMON_D19 PIN_CFG(217, ALT_C)
+
+#define GPIO218_GPIO PIN_CFG(218, GPIO)
+#define GPIO218_I2C2_SCL PIN_CFG(218, ALT_A)
+#define GPIO218_LCD_VSO PIN_CFG(218, ALT_B)
+
+#define GPIO219_GPIO PIN_CFG(219, GPIO)
+#define GPIO219_I2C2_SDA PIN_CFG(219, ALT_A)
+#define GPIO219_LCD_D3 PIN_CFG(219, ALT_B)
+
+#define GPIO220_GPIO PIN_CFG(220, GPIO)
+#define GPIO220_MSP2_TCK PIN_CFG(220, ALT_A)
+#define GPIO220_LCD_D2 PIN_CFG(220, ALT_B)
+
+#define GPIO221_GPIO PIN_CFG(221, GPIO)
+#define GPIO221_MSP2_TFS PIN_CFG(221, ALT_A)
+#define GPIO221_LCD_D1 PIN_CFG(221, ALT_B)
+
+#define GPIO222_GPIO PIN_CFG(222, GPIO)
+#define GPIO222_MSP2_TXD PIN_CFG(222, ALT_A)
+#define GPIO222_LCD_D0 PIN_CFG(222, ALT_B)
+
+#define GPIO223_GPIO PIN_CFG(223, GPIO)
+#define GPIO223_MSP2_RXD PIN_CFG(223, ALT_A)
+#define GPIO223_LCD_CLK PIN_CFG(223, ALT_B)
+
+#define GPIO224_GPIO PIN_CFG(224, GPIO)
+#define GPIO224_PRCMU_PWRCTRL0 PIN_CFG(224, ALT_A)
+#define GPIO224_LCD_VSI1 PIN_CFG(224, ALT_B)
+
+#define GPIO225_GPIO PIN_CFG(225, GPIO)
+#define GPIO225_PRCMU_PWRCTRL1 PIN_CFG(225, ALT_A)
+#define GPIO225_IRDA_RXD PIN_CFG(225, ALT_B)
+
+#define GPIO226_GPIO PIN_CFG(226, GPIO)
+#define GPIO226_PRCMU_PWRCTRL2 PIN_CFG(226, ALT_A)
+#define GPIO226_IRRC_DAT PIN_CFG(226, ALT_B)
+
+#define GPIO227_GPIO PIN_CFG(227, GPIO)
+#define GPIO227_IRRC_DAT PIN_CFG(227, ALT_A)
+#define GPIO227_IRDA_TXD PIN_CFG(227, ALT_B)
+
+#endif
diff --git a/arch/arm/mach-ux500/pins-db8500.h b/arch/arm/mach-ux500/pins-db8500.h
index 9055d5d..66f8761 100644
--- a/arch/arm/mach-ux500/pins-db8500.h
+++ b/arch/arm/mach-ux500/pins-db8500.h
@@ -96,57 +96,57 @@
#define GPIO17_SLIM0_CLK PIN_CFG(17, ALT_C)
#define GPIO18_GPIO PIN_CFG(18, GPIO)
-#define GPIO18_MC0_CMDDIR PIN_CFG(18, ALT_A)
+#define GPIO18_MC0_CMDDIR PIN_CFG_PULL(18, ALT_A, UP)
#define GPIO18_U2_RXD PIN_CFG(18, ALT_B)
#define GPIO18_MS_IEP PIN_CFG(18, ALT_C)
#define GPIO19_GPIO PIN_CFG(19, GPIO)
-#define GPIO19_MC0_DAT0DIR PIN_CFG(19, ALT_A)
+#define GPIO19_MC0_DAT0DIR PIN_CFG_PULL(19, ALT_A, UP)
#define GPIO19_U2_TXD PIN_CFG(19, ALT_B)
#define GPIO19_MS_DAT0DIR PIN_CFG(19, ALT_C)
#define GPIO20_GPIO PIN_CFG(20, GPIO)
-#define GPIO20_MC0_DAT2DIR PIN_CFG(20, ALT_A)
+#define GPIO20_MC0_DAT2DIR PIN_CFG_PULL(20, ALT_A, UP)
#define GPIO20_UARTMOD_TXD PIN_CFG(20, ALT_B)
#define GPIO20_IP_TRIGOUT PIN_CFG(20, ALT_C)
#define GPIO21_GPIO PIN_CFG(21, GPIO)
-#define GPIO21_MC0_DAT31DIR PIN_CFG(21, ALT_A)
+#define GPIO21_MC0_DAT31DIR PIN_CFG_PULL(21, ALT_A, UP)
#define GPIO21_MSP0_SCK PIN_CFG(21, ALT_B)
#define GPIO21_MS_DAT31DIR PIN_CFG(21, ALT_C)
#define GPIO22_GPIO PIN_CFG(22, GPIO)
-#define GPIO22_MC0_FBCLK PIN_CFG(22, ALT_A)
+#define GPIO22_MC0_FBCLK PIN_CFG_PULL(22, ALT_A, UP)
#define GPIO22_UARTMOD_RXD PIN_CFG(22, ALT_B)
#define GPIO22_MS_FBCLK PIN_CFG(22, ALT_C)
#define GPIO23_GPIO PIN_CFG(23, GPIO)
-#define GPIO23_MC0_CLK PIN_CFG(23, ALT_A)
+#define GPIO23_MC0_CLK PIN_CFG_PULL(23, ALT_A, UP)
#define GPIO23_STMMOD_CLK PIN_CFG(23, ALT_B)
#define GPIO23_MS_CLK PIN_CFG(23, ALT_C)
#define GPIO24_GPIO PIN_CFG(24, GPIO)
-#define GPIO24_MC0_CMD PIN_CFG(24, ALT_A)
+#define GPIO24_MC0_CMD PIN_CFG_PULL(24, ALT_A, UP)
#define GPIO24_UARTMOD_RXD PIN_CFG(24, ALT_B)
#define GPIO24_MS_BS PIN_CFG(24, ALT_C)
#define GPIO25_GPIO PIN_CFG(25, GPIO)
-#define GPIO25_MC0_DAT0 PIN_CFG(25, ALT_A)
+#define GPIO25_MC0_DAT0 PIN_CFG_PULL(25, ALT_A, UP)
#define GPIO25_STMMOD_DAT0 PIN_CFG(25, ALT_B)
#define GPIO25_MS_DAT0 PIN_CFG(25, ALT_C)
#define GPIO26_GPIO PIN_CFG(26, GPIO)
-#define GPIO26_MC0_DAT1 PIN_CFG(26, ALT_A)
+#define GPIO26_MC0_DAT1 PIN_CFG_PULL(26, ALT_A, UP)
#define GPIO26_STMMOD_DAT1 PIN_CFG(26, ALT_B)
#define GPIO26_MS_DAT1 PIN_CFG(26, ALT_C)
#define GPIO27_GPIO PIN_CFG(27, GPIO)
-#define GPIO27_MC0_DAT2 PIN_CFG(27, ALT_A)
+#define GPIO27_MC0_DAT2 PIN_CFG_PULL(27, ALT_A, UP)
#define GPIO27_STMMOD_DAT2 PIN_CFG(27, ALT_B)
#define GPIO27_MS_DAT2 PIN_CFG(27, ALT_C)
#define GPIO28_GPIO PIN_CFG(28, GPIO)
-#define GPIO28_MC0_DAT3 PIN_CFG(28, ALT_A)
+#define GPIO28_MC0_DAT3 PIN_CFG_PULL(28, ALT_A, UP)
#define GPIO28_STMMOD_DAT3 PIN_CFG(28, ALT_B)
#define GPIO28_MS_DAT3 PIN_CFG(28, ALT_C)
@@ -357,48 +357,48 @@
#define GPIO97_MC5_DAT7 PIN_CFG(97, ALT_C)
#define GPIO128_GPIO PIN_CFG(128, GPIO)
-#define GPIO128_MC2_CLK PIN_CFG(128, ALT_A)
+#define GPIO128_MC2_CLK PIN_CFG_PULL(128, ALT_A, UP)
#define GPIO128_SM_CKO PIN_CFG(128, ALT_B)
#define GPIO129_GPIO PIN_CFG(129, GPIO)
-#define GPIO129_MC2_CMD PIN_CFG(129, ALT_A)
+#define GPIO129_MC2_CMD PIN_CFG_PULL(129, ALT_A, UP)
#define GPIO129_SM_WAIT0n PIN_CFG(129, ALT_B)
#define GPIO130_GPIO PIN_CFG(130, GPIO)
-#define GPIO130_MC2_FBCLK PIN_CFG(130, ALT_A)
+#define GPIO130_MC2_FBCLK PIN_CFG_PULL(130, ALT_A, UP)
#define GPIO130_SM_FBCLK PIN_CFG(130, ALT_B)
#define GPIO130_MC2_RSTN PIN_CFG(130, ALT_C)
#define GPIO131_GPIO PIN_CFG(131, GPIO)
-#define GPIO131_MC2_DAT0 PIN_CFG(131, ALT_A)
+#define GPIO131_MC2_DAT0 PIN_CFG_PULL(131, ALT_A, UP)
#define GPIO131_SM_ADQ8 PIN_CFG(131, ALT_B)
#define GPIO132_GPIO PIN_CFG(132, GPIO)
-#define GPIO132_MC2_DAT1 PIN_CFG(132, ALT_A)
+#define GPIO132_MC2_DAT1 PIN_CFG_PULL(132, ALT_A, UP)
#define GPIO132_SM_ADQ9 PIN_CFG(132, ALT_B)
#define GPIO133_GPIO PIN_CFG(133, GPIO)
-#define GPIO133_MC2_DAT2 PIN_CFG(133, ALT_A)
+#define GPIO133_MC2_DAT2 PIN_CFG_PULL(133, ALT_A, UP)
#define GPIO133_SM_ADQ10 PIN_CFG(133, ALT_B)
#define GPIO134_GPIO PIN_CFG(134, GPIO)
-#define GPIO134_MC2_DAT3 PIN_CFG(134, ALT_A)
+#define GPIO134_MC2_DAT3 PIN_CFG_PULL(134, ALT_A, UP)
#define GPIO134_SM_ADQ11 PIN_CFG(134, ALT_B)
#define GPIO135_GPIO PIN_CFG(135, GPIO)
-#define GPIO135_MC2_DAT4 PIN_CFG(135, ALT_A)
+#define GPIO135_MC2_DAT4 PIN_CFG_PULL(135, ALT_A, UP)
#define GPIO135_SM_ADQ12 PIN_CFG(135, ALT_B)
#define GPIO136_GPIO PIN_CFG(136, GPIO)
-#define GPIO136_MC2_DAT5 PIN_CFG(136, ALT_A)
+#define GPIO136_MC2_DAT5 PIN_CFG_PULL(136, ALT_A, UP)
#define GPIO136_SM_ADQ13 PIN_CFG(136, ALT_B)
#define GPIO137_GPIO PIN_CFG(137, GPIO)
-#define GPIO137_MC2_DAT6 PIN_CFG(137, ALT_A)
+#define GPIO137_MC2_DAT6 PIN_CFG_PULL(137, ALT_A, UP)
#define GPIO137_SM_ADQ14 PIN_CFG(137, ALT_B)
#define GPIO138_GPIO PIN_CFG(138, GPIO)
-#define GPIO138_MC2_DAT7 PIN_CFG(138, ALT_A)
+#define GPIO138_MC2_DAT7 PIN_CFG_PULL(138, ALT_A, UP)
#define GPIO138_SM_ADQ15 PIN_CFG(138, ALT_B)
#define GPIO139_GPIO PIN_CFG(139, GPIO)
@@ -569,39 +569,39 @@
#define GPIO196_MSP2_RXD PIN_CFG(196, ALT_A)
#define GPIO197_GPIO PIN_CFG(197, GPIO)
-#define GPIO197_MC4_DAT3 PIN_CFG(197, ALT_A)
+#define GPIO197_MC4_DAT3 PIN_CFG_PULL(197, ALT_A, UP)
#define GPIO198_GPIO PIN_CFG(198, GPIO)
-#define GPIO198_MC4_DAT2 PIN_CFG(198, ALT_A)
+#define GPIO198_MC4_DAT2 PIN_CFG_PULL(198, ALT_A, UP)
#define GPIO199_GPIO PIN_CFG(199, GPIO)
-#define GPIO199_MC4_DAT1 PIN_CFG(199, ALT_A)
+#define GPIO199_MC4_DAT1 PIN_CFG_PULL(199, ALT_A, UP)
#define GPIO200_GPIO PIN_CFG(200, GPIO)
-#define GPIO200_MC4_DAT0 PIN_CFG(200, ALT_A)
+#define GPIO200_MC4_DAT0 PIN_CFG_PULL(200, ALT_A, UP)
#define GPIO201_GPIO PIN_CFG(201, GPIO)
-#define GPIO201_MC4_CMD PIN_CFG(201, ALT_A)
+#define GPIO201_MC4_CMD PIN_CFG_PULL(201, ALT_A, UP)
#define GPIO202_GPIO PIN_CFG(202, GPIO)
-#define GPIO202_MC4_FBCLK PIN_CFG(202, ALT_A)
+#define GPIO202_MC4_FBCLK PIN_CFG_PULL(202, ALT_A, UP)
#define GPIO202_PWL PIN_CFG(202, ALT_B)
#define GPIO202_MC4_RSTN PIN_CFG(202, ALT_C)
#define GPIO203_GPIO PIN_CFG(203, GPIO)
-#define GPIO203_MC4_CLK PIN_CFG(203, ALT_A)
+#define GPIO203_MC4_CLK PIN_CFG_PULL(203, ALT_A, UP)
#define GPIO204_GPIO PIN_CFG(204, GPIO)
-#define GPIO204_MC4_DAT7 PIN_CFG(204, ALT_A)
+#define GPIO204_MC4_DAT7 PIN_CFG_PULL(204, ALT_A, UP)
#define GPIO205_GPIO PIN_CFG(205, GPIO)
-#define GPIO205_MC4_DAT6 PIN_CFG(205, ALT_A)
+#define GPIO205_MC4_DAT6 PIN_CFG_PULL(205, ALT_A, UP)
#define GPIO206_GPIO PIN_CFG(206, GPIO)
-#define GPIO206_MC4_DAT5 PIN_CFG(206, ALT_A)
+#define GPIO206_MC4_DAT5 PIN_CFG_PULL(206, ALT_A, UP)
#define GPIO207_GPIO PIN_CFG(207, GPIO)
-#define GPIO207_MC4_DAT4 PIN_CFG(207, ALT_A)
+#define GPIO207_MC4_DAT4 PIN_CFG_PULL(207, ALT_A, UP)
#define GPIO208_GPIO PIN_CFG(208, GPIO)
#define GPIO208_MC1_CLK PIN_CFG(208, ALT_A)
diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c
index 438ef16..9e4c678 100644
--- a/arch/arm/mach-ux500/platsmp.c
+++ b/arch/arm/mach-ux500/platsmp.c
@@ -78,6 +78,8 @@
__cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
outer_clean_range(__pa(&pen_release), __pa(&pen_release) + 1);
+ smp_cross_call(cpumask_of(cpu));
+
timeout = jiffies + (1 * HZ);
while (time_before(jiffies, timeout)) {
if (pen_release == -1)
diff --git a/arch/arm/mach-ux500/prcmu.c b/arch/arm/mach-ux500/prcmu.c
new file mode 100644
index 0000000..293274d
--- /dev/null
+++ b/arch/arm/mach-ux500/prcmu.c
@@ -0,0 +1,231 @@
+/*
+ * Copyright (C) ST Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com>
+ *
+ * U8500 PRCMU driver.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/jiffies.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+
+#include <mach/hardware.h>
+#include <mach/prcmu-regs.h>
+
+#define PRCMU_TCDM_BASE __io_address(U8500_PRCMU_TCDM_BASE)
+
+#define REQ_MB5 (PRCMU_TCDM_BASE + 0xE44)
+#define ACK_MB5 (PRCMU_TCDM_BASE + 0xDF4)
+
+#define REQ_MB5_I2C_SLAVE_OP (REQ_MB5)
+#define REQ_MB5_I2C_HW_BITS (REQ_MB5 + 1)
+#define REQ_MB5_I2C_REG (REQ_MB5 + 2)
+#define REQ_MB5_I2C_VAL (REQ_MB5 + 3)
+
+#define ACK_MB5_I2C_STATUS (ACK_MB5 + 1)
+#define ACK_MB5_I2C_VAL (ACK_MB5 + 3)
+
+#define I2C_WRITE(slave) ((slave) << 1)
+#define I2C_READ(slave) (((slave) << 1) | BIT(0))
+#define I2C_STOP_EN BIT(3)
+
+enum ack_mb5_status {
+ I2C_WR_OK = 0x01,
+ I2C_RD_OK = 0x02,
+};
+
+#define MBOX_BIT BIT
+#define NUM_MBOX 8
+
+static struct {
+ struct mutex lock;
+ struct completion work;
+ bool failed;
+ struct {
+ u8 status;
+ u8 value;
+ } ack;
+} mb5_transfer;
+
+/**
+ * prcmu_abb_read() - Read register value(s) from the ABB.
+ * @slave: The I2C slave address.
+ * @reg: The (start) register address.
+ * @value: The read out value(s).
+ * @size: The number of registers to read.
+ *
+ * Reads register value(s) from the ABB.
+ * @size has to be 1 for the current firmware version.
+ */
+int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size)
+{
+ int r;
+
+ if (size != 1)
+ return -EINVAL;
+
+ r = mutex_lock_interruptible(&mb5_transfer.lock);
+ if (r)
+ return r;
+
+ while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
+ cpu_relax();
+
+ writeb(I2C_READ(slave), REQ_MB5_I2C_SLAVE_OP);
+ writeb(I2C_STOP_EN, REQ_MB5_I2C_HW_BITS);
+ writeb(reg, REQ_MB5_I2C_REG);
+
+ writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET);
+ if (!wait_for_completion_timeout(&mb5_transfer.work,
+ msecs_to_jiffies(500))) {
+ pr_err("prcmu: prcmu_abb_read timed out.\n");
+ r = -EIO;
+ goto unlock_and_return;
+ }
+ r = ((mb5_transfer.ack.status == I2C_RD_OK) ? 0 : -EIO);
+ if (!r)
+ *value = mb5_transfer.ack.value;
+
+unlock_and_return:
+ mutex_unlock(&mb5_transfer.lock);
+ return r;
+}
+EXPORT_SYMBOL(prcmu_abb_read);
+
+/**
+ * prcmu_abb_write() - Write register value(s) to the ABB.
+ * @slave: The I2C slave address.
+ * @reg: The (start) register address.
+ * @value: The value(s) to write.
+ * @size: The number of registers to write.
+ *
+ * Reads register value(s) from the ABB.
+ * @size has to be 1 for the current firmware version.
+ */
+int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
+{
+ int r;
+
+ if (size != 1)
+ return -EINVAL;
+
+ r = mutex_lock_interruptible(&mb5_transfer.lock);
+ if (r)
+ return r;
+
+
+ while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
+ cpu_relax();
+
+ writeb(I2C_WRITE(slave), REQ_MB5_I2C_SLAVE_OP);
+ writeb(I2C_STOP_EN, REQ_MB5_I2C_HW_BITS);
+ writeb(reg, REQ_MB5_I2C_REG);
+ writeb(*value, REQ_MB5_I2C_VAL);
+
+ writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET);
+ if (!wait_for_completion_timeout(&mb5_transfer.work,
+ msecs_to_jiffies(500))) {
+ pr_err("prcmu: prcmu_abb_write timed out.\n");
+ r = -EIO;
+ goto unlock_and_return;
+ }
+ r = ((mb5_transfer.ack.status == I2C_WR_OK) ? 0 : -EIO);
+
+unlock_and_return:
+ mutex_unlock(&mb5_transfer.lock);
+ return r;
+}
+EXPORT_SYMBOL(prcmu_abb_write);
+
+static void read_mailbox_0(void)
+{
+ writel(MBOX_BIT(0), PRCM_ARM_IT1_CLEAR);
+}
+
+static void read_mailbox_1(void)
+{
+ writel(MBOX_BIT(1), PRCM_ARM_IT1_CLEAR);
+}
+
+static void read_mailbox_2(void)
+{
+ writel(MBOX_BIT(2), PRCM_ARM_IT1_CLEAR);
+}
+
+static void read_mailbox_3(void)
+{
+ writel(MBOX_BIT(3), PRCM_ARM_IT1_CLEAR);
+}
+
+static void read_mailbox_4(void)
+{
+ writel(MBOX_BIT(4), PRCM_ARM_IT1_CLEAR);
+}
+
+static void read_mailbox_5(void)
+{
+ mb5_transfer.ack.status = readb(ACK_MB5_I2C_STATUS);
+ mb5_transfer.ack.value = readb(ACK_MB5_I2C_VAL);
+ complete(&mb5_transfer.work);
+ writel(MBOX_BIT(5), PRCM_ARM_IT1_CLEAR);
+}
+
+static void read_mailbox_6(void)
+{
+ writel(MBOX_BIT(6), PRCM_ARM_IT1_CLEAR);
+}
+
+static void read_mailbox_7(void)
+{
+ writel(MBOX_BIT(7), PRCM_ARM_IT1_CLEAR);
+}
+
+static void (* const read_mailbox[NUM_MBOX])(void) = {
+ read_mailbox_0,
+ read_mailbox_1,
+ read_mailbox_2,
+ read_mailbox_3,
+ read_mailbox_4,
+ read_mailbox_5,
+ read_mailbox_6,
+ read_mailbox_7
+};
+
+static irqreturn_t prcmu_irq_handler(int irq, void *data)
+{
+ u32 bits;
+ u8 n;
+
+ bits = (readl(PRCM_ARM_IT1_VAL) & (MBOX_BIT(NUM_MBOX) - 1));
+ if (unlikely(!bits))
+ return IRQ_NONE;
+
+ for (n = 0; bits; n++) {
+ if (bits & MBOX_BIT(n)) {
+ bits -= MBOX_BIT(n);
+ read_mailbox[n]();
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static int __init prcmu_init(void)
+{
+ mutex_init(&mb5_transfer.lock);
+ init_completion(&mb5_transfer.work);
+
+ /* Clean up the mailbox interrupts after pre-kernel code. */
+ writel((MBOX_BIT(NUM_MBOX) - 1), PRCM_ARM_IT1_CLEAR);
+
+ return request_irq(IRQ_PRCMU, prcmu_irq_handler, 0, "prcmu", NULL);
+}
+
+arch_initcall(prcmu_init);
diff --git a/arch/arm/mach-ux500/ste-dma40-db5500.h b/arch/arm/mach-ux500/ste-dma40-db5500.h
new file mode 100644
index 0000000..cb2110c
--- /dev/null
+++ b/arch/arm/mach-ux500/ste-dma40-db5500.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * DB5500-SoC-specific configuration for DMA40
+ */
+
+#ifndef STE_DMA40_DB5500_H
+#define STE_DMA40_DB5500_H
+
+#define DB5500_DMA_NR_DEV 64
+
+enum dma_src_dev_type {
+ DB5500_DMA_DEV0_SPI0_RX = 0,
+ DB5500_DMA_DEV1_SPI1_RX = 1,
+ DB5500_DMA_DEV2_SPI2_RX = 2,
+ DB5500_DMA_DEV3_SPI3_RX = 3,
+ DB5500_DMA_DEV4_USB_OTG_IEP_1_9 = 4,
+ DB5500_DMA_DEV5_USB_OTG_IEP_2_10 = 5,
+ DB5500_DMA_DEV6_USB_OTG_IEP_3_11 = 6,
+ DB5500_DMA_DEV7_IRDA_RFS = 7,
+ DB5500_DMA_DEV8_IRDA_FIFO_RX = 8,
+ DB5500_DMA_DEV9_MSP0_RX = 9,
+ DB5500_DMA_DEV10_MSP1_RX = 10,
+ DB5500_DMA_DEV11_MSP2_RX = 11,
+ DB5500_DMA_DEV12_UART0_RX = 12,
+ DB5500_DMA_DEV13_UART1_RX = 13,
+ DB5500_DMA_DEV14_UART2_RX = 14,
+ DB5500_DMA_DEV15_UART3_RX = 15,
+ DB5500_DMA_DEV16_USB_OTG_IEP_8 = 16,
+ DB5500_DMA_DEV17_USB_OTG_IEP_1_9 = 17,
+ DB5500_DMA_DEV18_USB_OTG_IEP_2_10 = 18,
+ DB5500_DMA_DEV19_USB_OTG_IEP_3_11 = 19,
+ DB5500_DMA_DEV20_USB_OTG_IEP_4_12 = 20,
+ DB5500_DMA_DEV21_USB_OTG_IEP_5_13 = 21,
+ DB5500_DMA_DEV22_USB_OTG_IEP_6_14 = 22,
+ DB5500_DMA_DEV23_USB_OTG_IEP_7_15 = 23,
+ DB5500_DMA_DEV24_SDMMC0_RX = 24,
+ DB5500_DMA_DEV25_SDMMC1_RX = 25,
+ DB5500_DMA_DEV26_SDMMC2_RX = 26,
+ DB5500_DMA_DEV27_SDMMC3_RX = 27,
+ DB5500_DMA_DEV28_SDMMC4_RX = 28,
+ /* 29 - 32 not used */
+ DB5500_DMA_DEV33_SDMMC0_RX = 33,
+ DB5500_DMA_DEV34_SDMMC1_RX = 34,
+ DB5500_DMA_DEV35_SDMMC2_RX = 35,
+ DB5500_DMA_DEV36_SDMMC3_RX = 36,
+ DB5500_DMA_DEV37_SDMMC4_RX = 37,
+ DB5500_DMA_DEV38_USB_OTG_IEP_8 = 38,
+ DB5500_DMA_DEV39_USB_OTG_IEP_1_9 = 39,
+ DB5500_DMA_DEV40_USB_OTG_IEP_2_10 = 40,
+ DB5500_DMA_DEV41_USB_OTG_IEP_3_11 = 41,
+ DB5500_DMA_DEV42_USB_OTG_IEP_4_12 = 42,
+ DB5500_DMA_DEV43_USB_OTG_IEP_5_13 = 43,
+ DB5500_DMA_DEV44_USB_OTG_IEP_6_14 = 44,
+ DB5500_DMA_DEV45_USB_OTG_IEP_7_15 = 45,
+ /* 46 not used */
+ DB5500_DMA_DEV47_MCDE_RX = 47,
+ DB5500_DMA_DEV48_CRYPTO1_RX = 48,
+ /* 49, 50 not used */
+ DB5500_DMA_DEV49_I2C1_RX = 51,
+ DB5500_DMA_DEV50_I2C3_RX = 52,
+ DB5500_DMA_DEV51_I2C2_RX = 53,
+ /* 54 - 60 not used */
+ DB5500_DMA_DEV61_CRYPTO0_RX = 61,
+ /* 62, 63 not used */
+};
+
+enum dma_dest_dev_type {
+ DB5500_DMA_DEV0_SPI0_TX = 0,
+ DB5500_DMA_DEV1_SPI1_TX = 1,
+ DB5500_DMA_DEV2_SPI2_TX = 2,
+ DB5500_DMA_DEV3_SPI3_TX = 3,
+ DB5500_DMA_DEV4_USB_OTG_OEP_1_9 = 4,
+ DB5500_DMA_DEV5_USB_OTG_OEP_2_10 = 5,
+ DB5500_DMA_DEV6_USB_OTG_OEP_3_11 = 6,
+ DB5500_DMA_DEV7_IRRC_TX = 7,
+ DB5500_DMA_DEV8_IRDA_FIFO_TX = 8,
+ DB5500_DMA_DEV9_MSP0_TX = 9,
+ DB5500_DMA_DEV10_MSP1_TX = 10,
+ DB5500_DMA_DEV11_MSP2_TX = 11,
+ DB5500_DMA_DEV12_UART0_TX = 12,
+ DB5500_DMA_DEV13_UART1_TX = 13,
+ DB5500_DMA_DEV14_UART2_TX = 14,
+ DB5500_DMA_DEV15_UART3_TX = 15,
+ DB5500_DMA_DEV16_USB_OTG_OEP_8 = 16,
+ DB5500_DMA_DEV17_USB_OTG_OEP_1_9 = 17,
+ DB5500_DMA_DEV18_USB_OTG_OEP_2_10 = 18,
+ DB5500_DMA_DEV19_USB_OTG_OEP_3_11 = 19,
+ DB5500_DMA_DEV20_USB_OTG_OEP_4_12 = 20,
+ DB5500_DMA_DEV21_USB_OTG_OEP_5_13 = 21,
+ DB5500_DMA_DEV22_USB_OTG_OEP_6_14 = 22,
+ DB5500_DMA_DEV23_USB_OTG_OEP_7_15 = 23,
+ DB5500_DMA_DEV24_SDMMC0_TX = 24,
+ DB5500_DMA_DEV25_SDMMC1_TX = 25,
+ DB5500_DMA_DEV26_SDMMC2_TX = 26,
+ DB5500_DMA_DEV27_SDMMC3_TX = 27,
+ DB5500_DMA_DEV28_SDMMC4_TX = 28,
+ /* 29 - 31 not used */
+ DB5500_DMA_DEV32_FSMC_TX = 32,
+ DB5500_DMA_DEV33_SDMMC0_TX = 33,
+ DB5500_DMA_DEV34_SDMMC1_TX = 34,
+ DB5500_DMA_DEV35_SDMMC2_TX = 35,
+ DB5500_DMA_DEV36_SDMMC3_TX = 36,
+ DB5500_DMA_DEV37_SDMMC4_TX = 37,
+ DB5500_DMA_DEV38_USB_OTG_OEP_8 = 38,
+ DB5500_DMA_DEV39_USB_OTG_OEP_1_9 = 39,
+ DB5500_DMA_DEV40_USB_OTG_OEP_2_10 = 40,
+ DB5500_DMA_DEV41_USB_OTG_OEP_3_11 = 41,
+ DB5500_DMA_DEV42_USB_OTG_OEP_4_12 = 42,
+ DB5500_DMA_DEV43_USB_OTG_OEP_5_13 = 43,
+ DB5500_DMA_DEV44_USB_OTG_OEP_6_14 = 44,
+ DB5500_DMA_DEV45_USB_OTG_OEP_7_15 = 45,
+ /* 46 not used */
+ DB5500_DMA_DEV47_STM_TX = 47,
+ DB5500_DMA_DEV48_CRYPTO1_TX = 48,
+ DB5500_DMA_DEV49_CRYPTO1_TX_HASH1_TX = 49,
+ DB5500_DMA_DEV50_HASH1_TX = 50,
+ DB5500_DMA_DEV51_I2C1_TX = 51,
+ DB5500_DMA_DEV52_I2C3_TX = 52,
+ DB5500_DMA_DEV53_I2C2_TX = 53,
+ /* 54, 55 not used */
+ DB5500_DMA_MEMCPY_TX_1 = 56,
+ DB5500_DMA_MEMCPY_TX_2 = 57,
+ DB5500_DMA_MEMCPY_TX_3 = 58,
+ DB5500_DMA_MEMCPY_TX_4 = 59,
+ DB5500_DMA_MEMCPY_TX_5 = 60,
+ DB5500_DMA_DEV61_CRYPTO0_TX = 61,
+ DB5500_DMA_DEV62_CRYPTO0_TX_HASH0_TX = 62,
+ DB5500_DMA_DEV63_HASH0_TX = 63,
+};
+
+#endif
diff --git a/arch/arm/mach-ux500/ste-dma40-db8500.h b/arch/arm/mach-ux500/ste-dma40-db8500.h
index 9d9d379..a616419 100644
--- a/arch/arm/mach-ux500/ste-dma40-db8500.h
+++ b/arch/arm/mach-ux500/ste-dma40-db8500.h
@@ -10,145 +10,135 @@
#ifndef STE_DMA40_DB8500_H
#define STE_DMA40_DB8500_H
-#define STEDMA40_NR_DEV 64
+#define DB8500_DMA_NR_DEV 64
enum dma_src_dev_type {
- STEDMA40_DEV_SPI0_RX = 0,
- STEDMA40_DEV_SD_MMC0_RX = 1,
- STEDMA40_DEV_SD_MMC1_RX = 2,
- STEDMA40_DEV_SD_MMC2_RX = 3,
- STEDMA40_DEV_I2C1_RX = 4,
- STEDMA40_DEV_I2C3_RX = 5,
- STEDMA40_DEV_I2C2_RX = 6,
- STEDMA40_DEV_I2C4_RX = 7, /* Only on V1 */
- STEDMA40_DEV_SSP0_RX = 8,
- STEDMA40_DEV_SSP1_RX = 9,
- STEDMA40_DEV_MCDE_RX = 10,
- STEDMA40_DEV_UART2_RX = 11,
- STEDMA40_DEV_UART1_RX = 12,
- STEDMA40_DEV_UART0_RX = 13,
- STEDMA40_DEV_MSP2_RX = 14,
- STEDMA40_DEV_I2C0_RX = 15,
- STEDMA40_DEV_USB_OTG_IEP_8 = 16,
- STEDMA40_DEV_USB_OTG_IEP_1_9 = 17,
- STEDMA40_DEV_USB_OTG_IEP_2_10 = 18,
- STEDMA40_DEV_USB_OTG_IEP_3_11 = 19,
- STEDMA40_DEV_SLIM0_CH0_RX_HSI_RX_CH0 = 20,
- STEDMA40_DEV_SLIM0_CH1_RX_HSI_RX_CH1 = 21,
- STEDMA40_DEV_SLIM0_CH2_RX_HSI_RX_CH2 = 22,
- STEDMA40_DEV_SLIM0_CH3_RX_HSI_RX_CH3 = 23,
- STEDMA40_DEV_SRC_SXA0_RX_TX = 24,
- STEDMA40_DEV_SRC_SXA1_RX_TX = 25,
- STEDMA40_DEV_SRC_SXA2_RX_TX = 26,
- STEDMA40_DEV_SRC_SXA3_RX_TX = 27,
- STEDMA40_DEV_SD_MM2_RX = 28,
- STEDMA40_DEV_SD_MM0_RX = 29,
- STEDMA40_DEV_MSP1_RX = 30,
- /*
- * This channel is either SlimBus or MSP,
- * never both at the same time.
- */
- STEDMA40_SLIM0_CH0_RX = 31,
- STEDMA40_DEV_MSP0_RX = 31,
- STEDMA40_DEV_SD_MM1_RX = 32,
- STEDMA40_DEV_SPI2_RX = 33,
- STEDMA40_DEV_I2C3_RX2 = 34,
- STEDMA40_DEV_SPI1_RX = 35,
- STEDMA40_DEV_USB_OTG_IEP_4_12 = 36,
- STEDMA40_DEV_USB_OTG_IEP_5_13 = 37,
- STEDMA40_DEV_USB_OTG_IEP_6_14 = 38,
- STEDMA40_DEV_USB_OTG_IEP_7_15 = 39,
- STEDMA40_DEV_SPI3_RX = 40,
- STEDMA40_DEV_SD_MM3_RX = 41,
- STEDMA40_DEV_SD_MM4_RX = 42,
- STEDMA40_DEV_SD_MM5_RX = 43,
- STEDMA40_DEV_SRC_SXA4_RX_TX = 44,
- STEDMA40_DEV_SRC_SXA5_RX_TX = 45,
- STEDMA40_DEV_SRC_SXA6_RX_TX = 46,
- STEDMA40_DEV_SRC_SXA7_RX_TX = 47,
- STEDMA40_DEV_CAC1_RX = 48,
- /* RX channels 49 and 50 are unused */
- STEDMA40_DEV_MSHC_RX = 51,
- STEDMA40_DEV_SLIM1_CH0_RX_HSI_RX_CH4 = 52,
- STEDMA40_DEV_SLIM1_CH1_RX_HSI_RX_CH5 = 53,
- STEDMA40_DEV_SLIM1_CH2_RX_HSI_RX_CH6 = 54,
- STEDMA40_DEV_SLIM1_CH3_RX_HSI_RX_CH7 = 55,
- /* RX channels 56 thru 60 are unused */
- STEDMA40_DEV_CAC0_RX = 61,
- /* RX channels 62 and 63 are unused */
+ DB8500_DMA_DEV0_SPI0_RX = 0,
+ DB8500_DMA_DEV1_SD_MMC0_RX = 1,
+ DB8500_DMA_DEV2_SD_MMC1_RX = 2,
+ DB8500_DMA_DEV3_SD_MMC2_RX = 3,
+ DB8500_DMA_DEV4_I2C1_RX = 4,
+ DB8500_DMA_DEV5_I2C3_RX = 5,
+ DB8500_DMA_DEV6_I2C2_RX = 6,
+ DB8500_DMA_DEV7_I2C4_RX = 7, /* Only on V1 and later */
+ DB8500_DMA_DEV8_SSP0_RX = 8,
+ DB8500_DMA_DEV9_SSP1_RX = 9,
+ DB8500_DMA_DEV10_MCDE_RX = 10,
+ DB8500_DMA_DEV11_UART2_RX = 11,
+ DB8500_DMA_DEV12_UART1_RX = 12,
+ DB8500_DMA_DEV13_UART0_RX = 13,
+ DB8500_DMA_DEV14_MSP2_RX = 14,
+ DB8500_DMA_DEV15_I2C0_RX = 15,
+ DB8500_DMA_DEV16_USB_OTG_IEP_7_15 = 16,
+ DB8500_DMA_DEV17_USB_OTG_IEP_6_14 = 17,
+ DB8500_DMA_DEV18_USB_OTG_IEP_5_13 = 18,
+ DB8500_DMA_DEV19_USB_OTG_IEP_4_12 = 19,
+ DB8500_DMA_DEV20_SLIM0_CH0_RX_HSI_RX_CH0 = 20,
+ DB8500_DMA_DEV21_SLIM0_CH1_RX_HSI_RX_CH1 = 21,
+ DB8500_DMA_DEV22_SLIM0_CH2_RX_HSI_RX_CH2 = 22,
+ DB8500_DMA_DEV23_SLIM0_CH3_RX_HSI_RX_CH3 = 23,
+ DB8500_DMA_DEV24_SRC_SXA0_RX_TX = 24,
+ DB8500_DMA_DEV25_SRC_SXA1_RX_TX = 25,
+ DB8500_DMA_DEV26_SRC_SXA2_RX_TX = 26,
+ DB8500_DMA_DEV27_SRC_SXA3_RX_TX = 27,
+ DB8500_DMA_DEV28_SD_MM2_RX = 28,
+ DB8500_DMA_DEV29_SD_MM0_RX = 29,
+ DB8500_DMA_DEV30_MSP1_RX = 30,
+ /* On DB8500v2, MSP3 RX replaces MSP1 RX */
+ DB8500_DMA_DEV30_MSP3_RX = 30,
+ DB8500_DMA_DEV31_MSP0_RX_SLIM0_CH0_RX = 31,
+ DB8500_DMA_DEV32_SD_MM1_RX = 32,
+ DB8500_DMA_DEV33_SPI2_RX = 33,
+ DB8500_DMA_DEV34_I2C3_RX2 = 34,
+ DB8500_DMA_DEV35_SPI1_RX = 35,
+ DB8500_DMA_DEV36_USB_OTG_IEP_3_11 = 36,
+ DB8500_DMA_DEV37_USB_OTG_IEP_2_10 = 37,
+ DB8500_DMA_DEV38_USB_OTG_IEP_1_9 = 38,
+ DB8500_DMA_DEV39_USB_OTG_IEP_8 = 39,
+ DB8500_DMA_DEV40_SPI3_RX = 40,
+ DB8500_DMA_DEV41_SD_MM3_RX = 41,
+ DB8500_DMA_DEV42_SD_MM4_RX = 42,
+ DB8500_DMA_DEV43_SD_MM5_RX = 43,
+ DB8500_DMA_DEV44_SRC_SXA4_RX_TX = 44,
+ DB8500_DMA_DEV45_SRC_SXA5_RX_TX = 45,
+ DB8500_DMA_DEV46_SLIM0_CH8_RX_SRC_SXA6_RX_TX = 46,
+ DB8500_DMA_DEV47_SLIM0_CH9_RX_SRC_SXA7_RX_TX = 47,
+ DB8500_DMA_DEV48_CAC1_RX = 48,
+ /* 49, 50 and 51 are not used */
+ DB8500_DMA_DEV52_SLIM0_CH4_RX_HSI_RX_CH4 = 52,
+ DB8500_DMA_DEV53_SLIM0_CH5_RX_HSI_RX_CH5 = 53,
+ DB8500_DMA_DEV54_SLIM0_CH6_RX_HSI_RX_CH6 = 54,
+ DB8500_DMA_DEV55_SLIM0_CH7_RX_HSI_RX_CH7 = 55,
+ /* 56, 57, 58, 59 and 60 are not used */
+ DB8500_DMA_DEV61_CAC0_RX = 61,
+ /* 62 and 63 are not used */
};
enum dma_dest_dev_type {
- STEDMA40_DEV_SPI0_TX = 0,
- STEDMA40_DEV_SD_MMC0_TX = 1,
- STEDMA40_DEV_SD_MMC1_TX = 2,
- STEDMA40_DEV_SD_MMC2_TX = 3,
- STEDMA40_DEV_I2C1_TX = 4,
- STEDMA40_DEV_I2C3_TX = 5,
- STEDMA40_DEV_I2C2_TX = 6,
- STEDMA50_DEV_I2C4_TX = 7, /* Only on V1 */
- STEDMA40_DEV_SSP0_TX = 8,
- STEDMA40_DEV_SSP1_TX = 9,
- /* TX channel 10 is unused */
- STEDMA40_DEV_UART2_TX = 11,
- STEDMA40_DEV_UART1_TX = 12,
- STEDMA40_DEV_UART0_TX= 13,
- STEDMA40_DEV_MSP2_TX = 14,
- STEDMA40_DEV_I2C0_TX = 15,
- STEDMA40_DEV_USB_OTG_OEP_8 = 16,
- STEDMA40_DEV_USB_OTG_OEP_1_9 = 17,
- STEDMA40_DEV_USB_OTG_OEP_2_10= 18,
- STEDMA40_DEV_USB_OTG_OEP_3_11 = 19,
- STEDMA40_DEV_SLIM0_CH0_TX_HSI_TX_CH0 = 20,
- STEDMA40_DEV_SLIM0_CH1_TX_HSI_TX_CH1 = 21,
- STEDMA40_DEV_SLIM0_CH2_TX_HSI_TX_CH2 = 22,
- STEDMA40_DEV_SLIM0_CH3_TX_HSI_TX_CH3 = 23,
- STEDMA40_DEV_DST_SXA0_RX_TX = 24,
- STEDMA40_DEV_DST_SXA1_RX_TX = 25,
- STEDMA40_DEV_DST_SXA2_RX_TX = 26,
- STEDMA40_DEV_DST_SXA3_RX_TX = 27,
- STEDMA40_DEV_SD_MM2_TX = 28,
- STEDMA40_DEV_SD_MM0_TX = 29,
- STEDMA40_DEV_MSP1_TX = 30,
- /*
- * This channel is either SlimBus or MSP,
- * never both at the same time.
- */
- STEDMA40_SLIM0_CH0_TX = 31,
- STEDMA40_DEV_MSP0_TX = 31,
- STEDMA40_DEV_SD_MM1_TX = 32,
- STEDMA40_DEV_SPI2_TX = 33,
- /* Secondary I2C3 channel */
- STEDMA40_DEV_I2C3_TX2 = 34,
- STEDMA40_DEV_SPI1_TX = 35,
- STEDMA40_DEV_USB_OTG_OEP_4_12 = 36,
- STEDMA40_DEV_USB_OTG_OEP_5_13 = 37,
- STEDMA40_DEV_USB_OTG_OEP_6_14 = 38,
- STEDMA40_DEV_USB_OTG_OEP_7_15 = 39,
- STEDMA40_DEV_SPI3_TX = 40,
- STEDMA40_DEV_SD_MM3_TX = 41,
- STEDMA40_DEV_SD_MM4_TX = 42,
- STEDMA40_DEV_SD_MM5_TX = 43,
- STEDMA40_DEV_DST_SXA4_RX_TX = 44,
- STEDMA40_DEV_DST_SXA5_RX_TX = 45,
- STEDMA40_DEV_DST_SXA6_RX_TX = 46,
- STEDMA40_DEV_DST_SXA7_RX_TX = 47,
- STEDMA40_DEV_CAC1_TX = 48,
- STEDMA40_DEV_CAC1_TX_HAC1_TX = 49,
- STEDMA40_DEV_HAC1_TX = 50,
- STEDMA40_MEMCPY_TX_0 = 51,
- STEDMA40_DEV_SLIM1_CH0_TX_HSI_TX_CH4 = 52,
- STEDMA40_DEV_SLIM1_CH1_TX_HSI_TX_CH5 = 53,
- STEDMA40_DEV_SLIM1_CH2_TX_HSI_TX_CH6 = 54,
- STEDMA40_DEV_SLIM1_CH3_TX_HSI_TX_CH7 = 55,
- STEDMA40_MEMCPY_TX_1 = 56,
- STEDMA40_MEMCPY_TX_2 = 57,
- STEDMA40_MEMCPY_TX_3 = 58,
- STEDMA40_MEMCPY_TX_4 = 59,
- STEDMA40_MEMCPY_TX_5 = 60,
- STEDMA40_DEV_CAC0_TX = 61,
- STEDMA40_DEV_CAC0_TX_HAC0_TX = 62,
- STEDMA40_DEV_HAC0_TX = 63,
+ DB8500_DMA_DEV0_SPI0_TX = 0,
+ DB8500_DMA_DEV1_SD_MMC0_TX = 1,
+ DB8500_DMA_DEV2_SD_MMC1_TX = 2,
+ DB8500_DMA_DEV3_SD_MMC2_TX = 3,
+ DB8500_DMA_DEV4_I2C1_TX = 4,
+ DB8500_DMA_DEV5_I2C3_TX = 5,
+ DB8500_DMA_DEV6_I2C2_TX = 6,
+ DB8500_DMA_DEV7_I2C4_TX = 7, /* Only on V1 and later */
+ DB8500_DMA_DEV8_SSP0_TX = 8,
+ DB8500_DMA_DEV9_SSP1_TX = 9,
+ /* 10 is not used*/
+ DB8500_DMA_DEV11_UART2_TX = 11,
+ DB8500_DMA_DEV12_UART1_TX = 12,
+ DB8500_DMA_DEV13_UART0_TX = 13,
+ DB8500_DMA_DEV14_MSP2_TX = 14,
+ DB8500_DMA_DEV15_I2C0_TX = 15,
+ DB8500_DMA_DEV16_USB_OTG_OEP_7_15 = 16,
+ DB8500_DMA_DEV17_USB_OTG_OEP_6_14 = 17,
+ DB8500_DMA_DEV18_USB_OTG_OEP_5_13 = 18,
+ DB8500_DMA_DEV19_USB_OTG_OEP_4_12 = 19,
+ DB8500_DMA_DEV20_SLIM0_CH0_TX_HSI_TX_CH0 = 20,
+ DB8500_DMA_DEV21_SLIM0_CH1_TX_HSI_TX_CH1 = 21,
+ DB8500_DMA_DEV22_SLIM0_CH2_TX_HSI_TX_CH2 = 22,
+ DB8500_DMA_DEV23_SLIM0_CH3_TX_HSI_TX_CH3 = 23,
+ DB8500_DMA_DEV24_DST_SXA0_RX_TX = 24,
+ DB8500_DMA_DEV25_DST_SXA1_RX_TX = 25,
+ DB8500_DMA_DEV26_DST_SXA2_RX_TX = 26,
+ DB8500_DMA_DEV27_DST_SXA3_RX_TX = 27,
+ DB8500_DMA_DEV28_SD_MM2_TX = 28,
+ DB8500_DMA_DEV29_SD_MM0_TX = 29,
+ DB8500_DMA_DEV30_MSP1_TX = 30,
+ DB8500_DMA_DEV31_MSP0_TX_SLIM0_CH0_TX = 31,
+ DB8500_DMA_DEV32_SD_MM1_TX = 32,
+ DB8500_DMA_DEV33_SPI2_TX = 33,
+ DB8500_DMA_DEV34_I2C3_TX2 = 34,
+ DB8500_DMA_DEV35_SPI1_TX = 35,
+ DB8500_DMA_DEV36_USB_OTG_OEP_3_11 = 36,
+ DB8500_DMA_DEV37_USB_OTG_OEP_2_10 = 37,
+ DB8500_DMA_DEV38_USB_OTG_OEP_1_9 = 38,
+ DB8500_DMA_DEV39_USB_OTG_OEP_8 = 39,
+ DB8500_DMA_DEV40_SPI3_TX = 40,
+ DB8500_DMA_DEV41_SD_MM3_TX = 41,
+ DB8500_DMA_DEV42_SD_MM4_TX = 42,
+ DB8500_DMA_DEV43_SD_MM5_TX = 43,
+ DB8500_DMA_DEV44_DST_SXA4_RX_TX = 44,
+ DB8500_DMA_DEV45_DST_SXA5_RX_TX = 45,
+ DB8500_DMA_DEV46_SLIM0_CH8_TX_DST_SXA6_RX_TX = 46,
+ DB8500_DMA_DEV47_SLIM0_CH9_TX_DST_SXA7_RX_TX = 47,
+ DB8500_DMA_DEV48_CAC1_TX = 48,
+ DB8500_DMA_DEV49_CAC1_TX_HAC1_TX = 49,
+ DB8500_DMA_DEV50_HAC1_TX = 50,
+ DB8500_DMA_MEMCPY_TX_0 = 51,
+ DB8500_DMA_DEV52_SLIM1_CH4_TX_HSI_TX_CH4 = 52,
+ DB8500_DMA_DEV53_SLIM1_CH5_TX_HSI_TX_CH5 = 53,
+ DB8500_DMA_DEV54_SLIM1_CH6_TX_HSI_TX_CH6 = 54,
+ DB8500_DMA_DEV55_SLIM1_CH7_TX_HSI_TX_CH7 = 55,
+ DB8500_DMA_MEMCPY_TX_1 = 56,
+ DB8500_DMA_MEMCPY_TX_2 = 57,
+ DB8500_DMA_MEMCPY_TX_3 = 58,
+ DB8500_DMA_MEMCPY_TX_4 = 59,
+ DB8500_DMA_MEMCPY_TX_5 = 60,
+ DB8500_DMA_DEV61_CAC0_TX = 61,
+ DB8500_DMA_DEV62_CAC0_TX_HAC0_TX = 62,
+ DB8500_DMA_DEV63_HAC0_TX = 63,
};
#endif
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c
index 577df6c..efb1270 100644
--- a/arch/arm/mach-vexpress/ct-ca9x4.c
+++ b/arch/arm/mach-vexpress/ct-ca9x4.c
@@ -227,7 +227,13 @@
int i;
#ifdef CONFIG_CACHE_L2X0
- l2x0_init(MMIO_P2V(CT_CA9X4_L2CC), 0x00000000, 0xfe0fffff);
+ void __iomem *l2x0_base = MMIO_P2V(CT_CA9X4_L2CC);
+
+ /* set RAM latencies to 1 cycle for this core tile. */
+ writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL);
+ writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL);
+
+ l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
#endif
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
diff --git a/arch/arm/mach-vexpress/include/mach/smp.h b/arch/arm/mach-vexpress/include/mach/smp.h
index 72a9621..5a6da4f 100644
--- a/arch/arm/mach-vexpress/include/mach/smp.h
+++ b/arch/arm/mach-vexpress/include/mach/smp.h
@@ -2,14 +2,7 @@
#define __MACH_SMP_H
#include <asm/hardware/gic.h>
-
-#define hard_smp_processor_id() \
- ({ \
- unsigned int cpunum; \
- __asm__("mrc p15, 0, %0, c0, c0, 5" \
- : "=r" (cpunum)); \
- cpunum &= 0x0F; \
- })
+#include <asm/smp_mpidr.h>
/*
* We use IRQ1 as the IPI
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 33c3f57..a0a2928 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -398,7 +398,7 @@
# ARMv6k
config CPU_32v6K
bool "Support ARM V6K processor extensions" if !SMP
- depends on CPU_V6
+ depends on CPU_V6 || CPU_V7
default y if SMP && !(ARCH_MX3 || ARCH_OMAP2)
help
Say Y here if your ARMv6 processor supports the 'K' extension.
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index d073b64..724ba3b 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -885,8 +885,23 @@
if (ai_usermode & UM_SIGNAL)
force_sig(SIGBUS, current);
- else
- set_cr(cr_no_alignment);
+ else {
+ /*
+ * We're about to disable the alignment trap and return to
+ * user space. But if an interrupt occurs before actually
+ * reaching user space, then the IRQ vector entry code will
+ * notice that we were still in kernel space and therefore
+ * the alignment trap won't be re-enabled in that case as it
+ * is presumed to be always on from kernel space.
+ * Let's prevent that race by disabling interrupts here (they
+ * are disabled on the way back to user space anyway in
+ * entry-common.S) and disable the alignment trap only if
+ * there is no work pending for this thread.
+ */
+ raw_local_irq_disable();
+ if (!(current_thread_info()->flags & _TIF_WORK_MASK))
+ set_cr(cr_no_alignment);
+ }
return 0;
}
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 86aa689..99fa688 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -21,18 +21,22 @@
#define D_CACHE_LINE_SIZE 32
#define BTB_FLUSH_SIZE 8
-#ifdef CONFIG_ARM_ERRATA_411920
/*
- * Invalidate the entire I cache (this code is a workaround for the ARM1136
- * erratum 411920 - Invalidate Instruction Cache operation can fail. This
- * erratum is present in 1136, 1156 and 1176. It does not affect the MPCore.
+ * v6_flush_icache_all()
*
- * Registers:
- * r0 - set to 0
- * r1 - corrupted
+ * Flush the whole I-cache.
+ *
+ * ARM1136 erratum 411920 - Invalidate Instruction Cache operation can fail.
+ * This erratum is present in 1136, 1156 and 1176. It does not affect the
+ * MPCore.
+ *
+ * Registers:
+ * r0 - set to 0
+ * r1 - corrupted
*/
-ENTRY(v6_icache_inval_all)
+ENTRY(v6_flush_icache_all)
mov r0, #0
+#ifdef CONFIG_ARM_ERRATA_411920
mrs r1, cpsr
cpsid ifa @ disable interrupts
mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
@@ -43,8 +47,11 @@
.rept 11 @ ARM Ltd recommends at least
nop @ 11 NOPs
.endr
- mov pc, lr
+#else
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache
#endif
+ mov pc, lr
+ENDPROC(v6_flush_icache_all)
/*
* v6_flush_cache_all()
@@ -60,7 +67,7 @@
#ifndef CONFIG_ARM_ERRATA_411920
mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
#else
- b v6_icache_inval_all
+ b v6_flush_icache_all
#endif
#else
mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate
@@ -138,7 +145,7 @@
#ifndef CONFIG_ARM_ERRATA_411920
mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
#else
- b v6_icache_inval_all
+ b v6_flush_icache_all
#endif
#else
mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
@@ -312,6 +319,7 @@
.type v6_cache_fns, #object
ENTRY(v6_cache_fns)
+ .long v6_flush_icache_all
.long v6_flush_kern_cache_all
.long v6_flush_user_cache_all
.long v6_flush_user_cache_range
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 37c8157..a3ebf7a 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -18,6 +18,21 @@
#include "proc-macros.S"
/*
+ * v7_flush_icache_all()
+ *
+ * Flush the whole I-cache.
+ *
+ * Registers:
+ * r0 - set to 0
+ */
+ENTRY(v7_flush_icache_all)
+ mov r0, #0
+ ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
+ ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
+ mov pc, lr
+ENDPROC(v7_flush_icache_all)
+
+/*
* v7_flush_dcache_all()
*
* Flush the whole D-cache.
@@ -91,11 +106,8 @@
THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
bl v7_flush_dcache_all
mov r0, #0
-#ifdef CONFIG_SMP
- mcr p15, 0, r0, c7, c1, 0 @ invalidate I-cache inner shareable
-#else
- mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
-#endif
+ ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
+ ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
mov pc, lr
@@ -171,11 +183,8 @@
cmp r0, r1
blo 1b
mov r0, #0
-#ifdef CONFIG_SMP
- mcr p15, 0, r0, c7, c1, 6 @ invalidate BTB Inner Shareable
-#else
- mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
-#endif
+ ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable
+ ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB
dsb
isb
mov pc, lr
@@ -309,6 +318,7 @@
.type v7_cache_fns, #object
ENTRY(v7_cache_fns)
+ .long v7_flush_icache_all
.long v7_flush_kern_cache_all
.long v7_flush_user_cache_all
.long v7_flush_user_cache_range
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index 598c51a..b806151 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -73,7 +73,7 @@
{
void *kto = kmap_atomic(to, KM_USER1);
- if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
+ if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from);
spin_lock(&minicache_lock);
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index f55fa10..bdba6c6 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -79,7 +79,7 @@
unsigned int offset = CACHE_COLOUR(vaddr);
unsigned long kfrom, kto;
- if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
+ if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from);
/* FIXME: not highmem safe */
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index 9920c0a..649bbcd 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -95,7 +95,7 @@
{
void *kto = kmap_atomic(to, KM_USER1);
- if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
+ if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from);
spin_lock(&minicache_lock);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c704eed..e4dd064 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -229,6 +229,8 @@
}
} while (size -= PAGE_SIZE);
+ dsb();
+
return (void *)c->vm_start;
}
return NULL;
@@ -521,6 +523,12 @@
outer_inv_range(paddr, paddr + size);
dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
+
+ /*
+ * Mark the D-cache clean for this page to avoid extra flushing.
+ */
+ if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
+ set_bit(PG_dcache_clean, &page->flags);
}
EXPORT_SYMBOL(___dma_page_dev_to_cpu);
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 9b906de..8440d95 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -28,6 +28,7 @@
static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE;
+#if __LINUX_ARM_ARCH__ < 6
/*
* We take the easy way out of this problem - we make the
* PTE uncacheable. However, we leave the write buffer on.
@@ -141,7 +142,7 @@
* a page table, or changing an existing PTE. Basically, there are two
* things that we need to take care of:
*
- * 1. If PG_dcache_dirty is set for the page, we need to ensure
+ * 1. If PG_dcache_clean is not set for the page, we need to ensure
* that any cache entries for the kernels virtual memory
* range are written back to the page.
* 2. If we have multiple shared mappings of the same space in
@@ -168,10 +169,8 @@
return;
mapping = page_mapping(page);
-#ifndef CONFIG_SMP
- if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
+ if (!test_and_set_bit(PG_dcache_clean, &page->flags))
__flush_dcache_page(mapping, page);
-#endif
if (mapping) {
if (cache_is_vivt())
make_coherent(mapping, vma, addr, ptep, pfn);
@@ -179,6 +178,7 @@
__flush_icache_all();
}
}
+#endif /* __LINUX_ARM_ARCH__ < 6 */
/*
* Check whether the write buffer has physical address aliasing
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index c6844cb..391ffae 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -17,6 +17,7 @@
#include <asm/smp_plat.h>
#include <asm/system.h>
#include <asm/tlbflush.h>
+#include <asm/smp_plat.h>
#include "mm.h"
@@ -39,6 +40,18 @@
: "cc");
}
+static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
+{
+ unsigned long colour = CACHE_COLOUR(vaddr);
+ unsigned long offset = vaddr & (PAGE_SIZE - 1);
+ unsigned long to;
+
+ set_pte_ext(TOP_PTE(ALIAS_FLUSH_START) + colour, pfn_pte(pfn, PAGE_KERNEL), 0);
+ to = ALIAS_FLUSH_START + (colour << PAGE_SHIFT) + offset;
+ flush_tlb_kernel_page(to);
+ flush_icache_range(to, to + len);
+}
+
void flush_cache_mm(struct mm_struct *mm)
{
if (cache_is_vivt()) {
@@ -89,16 +102,16 @@
if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
__flush_icache_all();
}
+
#else
-#define flush_pfn_alias(pfn,vaddr) do { } while (0)
+#define flush_pfn_alias(pfn,vaddr) do { } while (0)
+#define flush_icache_alias(pfn,vaddr,len) do { } while (0)
#endif
-#ifdef CONFIG_SMP
static void flush_ptrace_access_other(void *args)
{
__flush_icache_all();
}
-#endif
static
void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
@@ -118,15 +131,16 @@
return;
}
- /* VIPT non-aliasing cache */
+ /* VIPT non-aliasing D-cache */
if (vma->vm_flags & VM_EXEC) {
unsigned long addr = (unsigned long)kaddr;
- __cpuc_coherent_kern_range(addr, addr + len);
-#ifdef CONFIG_SMP
+ if (icache_is_vipt_aliasing())
+ flush_icache_alias(page_to_pfn(page), uaddr, len);
+ else
+ __cpuc_coherent_kern_range(addr, addr + len);
if (cache_ops_need_broadcast())
smp_call_function(flush_ptrace_access_other,
NULL, 1);
-#endif
}
}
@@ -215,6 +229,36 @@
flush_dcache_mmap_unlock(mapping);
}
+#if __LINUX_ARM_ARCH__ >= 6
+void __sync_icache_dcache(pte_t pteval)
+{
+ unsigned long pfn;
+ struct page *page;
+ struct address_space *mapping;
+
+ if (!pte_present_user(pteval))
+ return;
+ if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
+ /* only flush non-aliasing VIPT caches for exec mappings */
+ return;
+ pfn = pte_pfn(pteval);
+ if (!pfn_valid(pfn))
+ return;
+
+ page = pfn_to_page(pfn);
+ if (cache_is_vipt_aliasing())
+ mapping = page_mapping(page);
+ else
+ mapping = NULL;
+
+ if (!test_and_set_bit(PG_dcache_clean, &page->flags))
+ __flush_dcache_page(mapping, page);
+ /* pte_exec() already checked above for non-aliasing VIPT cache */
+ if (cache_is_vipt_nonaliasing() || pte_exec(pteval))
+ __flush_icache_all();
+}
+#endif
+
/*
* Ensure cache coherency between kernel mapping and userspace mapping
* of this page.
@@ -246,17 +290,16 @@
mapping = page_mapping(page);
-#ifndef CONFIG_SMP
- if (!PageHighMem(page) && mapping && !mapping_mapped(mapping))
- set_bit(PG_dcache_dirty, &page->flags);
- else
-#endif
- {
+ if (!cache_ops_need_broadcast() &&
+ mapping && !mapping_mapped(mapping))
+ clear_bit(PG_dcache_clean, &page->flags);
+ else {
__flush_dcache_page(mapping, page);
if (mapping && cache_is_vivt())
__flush_dcache_aliases(mapping, page);
else if (mapping)
__flush_icache_all();
+ set_bit(PG_dcache_clean, &page->flags);
}
}
EXPORT_SYMBOL(flush_dcache_page);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 7185b00..36c4553 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -277,7 +277,7 @@
/* Register the kernel text, kernel data and initrd with memblock. */
#ifdef CONFIG_XIP_KERNEL
- memblock_reserve(__pa(_data), _end - _data);
+ memblock_reserve(__pa(_sdata), _end - _sdata);
#else
memblock_reserve(__pa(_stext), _end - _stext);
#endif
@@ -545,7 +545,7 @@
MLK_ROUNDUP(__init_begin, __init_end),
MLK_ROUNDUP(_text, _etext),
- MLK_ROUNDUP(_data, _edata));
+ MLK_ROUNDUP(_sdata, _edata));
#undef MLK
#undef MLM
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 6e1c4f6..e233581 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -15,6 +15,7 @@
#include <linux/nodemask.h>
#include <linux/memblock.h>
#include <linux/sort.h>
+#include <linux/fs.h>
#include <asm/cputype.h>
#include <asm/sections.h>
@@ -246,6 +247,9 @@
.domain = DOMAIN_USER,
},
[MT_MEMORY] = {
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+ L_PTE_USER | L_PTE_EXEC,
+ .prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
},
@@ -254,6 +258,9 @@
.domain = DOMAIN_KERNEL,
},
[MT_MEMORY_NONCACHED] = {
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+ L_PTE_USER | L_PTE_EXEC | L_PTE_MT_BUFFERABLE,
+ .prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
},
@@ -303,9 +310,8 @@
cachepolicy = CPOLICY_WRITEBACK;
ecc_mask = 0;
}
-#ifdef CONFIG_SMP
- cachepolicy = CPOLICY_WRITEALLOC;
-#endif
+ if (is_smp())
+ cachepolicy = CPOLICY_WRITEALLOC;
/*
* Strip out features not present on earlier architectures.
@@ -399,21 +405,22 @@
cp = &cache_policies[cachepolicy];
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
-#ifndef CONFIG_SMP
/*
* Only use write-through for non-SMP systems
*/
- if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
+ if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
-#endif
/*
* Enable CPU-specific coherency if supported.
* (Only available on XSC3 at the moment.)
*/
- if (arch_is_coherent() && cpu_is_xsc3())
+ if (arch_is_coherent() && cpu_is_xsc3()) {
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
-
+ mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
+ }
/*
* ARMv6 and above have extended page tables.
*/
@@ -426,20 +433,23 @@
mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
-#ifdef CONFIG_SMP
- /*
- * Mark memory with the "shared" attribute for SMP systems
- */
- user_pgprot |= L_PTE_SHARED;
- kern_pgprot |= L_PTE_SHARED;
- vecs_pgprot |= L_PTE_SHARED;
- mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
- mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
- mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
- mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
-#endif
+ if (is_smp()) {
+ /*
+ * Mark memory with the "shared" attribute
+ * for SMP systems
+ */
+ user_pgprot |= L_PTE_SHARED;
+ kern_pgprot |= L_PTE_SHARED;
+ vecs_pgprot |= L_PTE_SHARED;
+ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
+ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
+ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
+ }
}
/*
@@ -475,6 +485,8 @@
mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
+ mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
+ mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
mem_types[MT_ROM].prot_sect |= cp->pmd;
switch (cp->pmd) {
@@ -498,6 +510,19 @@
}
}
+#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
+pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot)
+{
+ if (!pfn_valid(pfn))
+ return pgprot_noncached(vma_prot);
+ else if (file->f_flags & O_SYNC)
+ return pgprot_writecombine(vma_prot);
+ return vma_prot;
+}
+EXPORT_SYMBOL(phys_mem_access_prot);
+#endif
+
#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
static void __init *early_alloc(unsigned long sz)
@@ -802,8 +827,7 @@
* rather difficult.
*/
reason = "with VIPT aliasing cache";
-#ifdef CONFIG_SMP
- } else if (tlb_ops_need_broadcast()) {
+ } else if (is_smp() && tlb_ops_need_broadcast()) {
/*
* kmap_high needs to occasionally flush TLB entries,
* however, if the TLB entries need to be broadcast
@@ -813,7 +837,6 @@
* (must not be called with irqs off)
*/
reason = "without hardware TLB ops broadcasting";
-#endif
}
if (reason) {
printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 22aac85..b95662d 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -30,13 +30,10 @@
#define TTB_RGN_WT (2 << 3)
#define TTB_RGN_WB (3 << 3)
-#ifndef CONFIG_SMP
-#define TTB_FLAGS TTB_RGN_WBWA
-#define PMD_FLAGS PMD_SECT_WB
-#else
-#define TTB_FLAGS TTB_RGN_WBWA|TTB_S
-#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S
-#endif
+#define TTB_FLAGS_UP TTB_RGN_WBWA
+#define PMD_FLAGS_UP PMD_SECT_WB
+#define TTB_FLAGS_SMP TTB_RGN_WBWA|TTB_S
+#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S
ENTRY(cpu_v6_proc_init)
mov pc, lr
@@ -97,7 +94,8 @@
#ifdef CONFIG_MMU
mov r2, #0
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
- orr r0, r0, #TTB_FLAGS
+ ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
+ ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
@@ -156,9 +154,11 @@
*/
__v6_setup:
#ifdef CONFIG_SMP
- mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode
+ ALT_SMP(mrc p15, 0, r0, c1, c0, 1) @ Enable SMP/nAMP mode
+ ALT_UP(nop)
orr r0, r0, #0x20
- mcr p15, 0, r0, c1, c0, 1
+ ALT_SMP(mcr p15, 0, r0, c1, c0, 1)
+ ALT_UP(nop)
#endif
mov r0, #0
@@ -169,7 +169,8 @@
#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
mcr p15, 0, r0, c2, c0, 2 @ TTB control register
- orr r4, r4, #TTB_FLAGS
+ ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
+ ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
mcr p15, 0, r4, c2, c0, 1 @ load TTB1
#endif /* CONFIG_MMU */
adr r5, v6_crval
@@ -225,10 +226,16 @@
__v6_proc_info:
.long 0x0007b000
.long 0x0007f000
- .long PMD_TYPE_SECT | \
+ ALT_SMP(.long \
+ PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ | \
- PMD_FLAGS
+ PMD_FLAGS_SMP)
+ ALT_UP(.long \
+ PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ | \
+ PMD_FLAGS_UP)
.long PMD_TYPE_SECT | \
PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \
@@ -249,10 +256,16 @@
__pj4_v6_proc_info:
.long 0x560f5810
.long 0xff0ffff0
- .long PMD_TYPE_SECT | \
+ ALT_SMP(.long \
+ PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ | \
- PMD_FLAGS
+ PMD_FLAGS_SMP)
+ ALT_UP(.long \
+ PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ | \
+ PMD_FLAGS_UP)
.long PMD_TYPE_SECT | \
PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 6a8506d..df422fe 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -30,15 +30,13 @@
#define TTB_IRGN_WT ((1 << 0) | (0 << 6))
#define TTB_IRGN_WB ((1 << 0) | (1 << 6))
-#ifndef CONFIG_SMP
/* PTWs cacheable, inner WB not shareable, outer WB not shareable */
-#define TTB_FLAGS TTB_IRGN_WB|TTB_RGN_OC_WB
-#define PMD_FLAGS PMD_SECT_WB
-#else
+#define TTB_FLAGS_UP TTB_IRGN_WB|TTB_RGN_OC_WB
+#define PMD_FLAGS_UP PMD_SECT_WB
+
/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
-#define TTB_FLAGS TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA
-#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S
-#endif
+#define TTB_FLAGS_SMP TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA
+#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S
ENTRY(cpu_v7_proc_init)
mov pc, lr
@@ -105,7 +103,8 @@
#ifdef CONFIG_MMU
mov r2, #0
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
- orr r0, r0, #TTB_FLAGS
+ ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
+ ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
#ifdef CONFIG_ARM_ERRATA_430973
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
#endif
@@ -186,13 +185,15 @@
* It is assumed that:
* - cache type register is implemented
*/
-__v7_setup:
+__v7_ca9mp_setup:
#ifdef CONFIG_SMP
- mrc p15, 0, r0, c1, c0, 1
+ ALT_SMP(mrc p15, 0, r0, c1, c0, 1)
+ ALT_UP(mov r0, #(1 << 6)) @ fake it for UP
tst r0, #(1 << 6) @ SMP/nAMP mode enabled?
orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and
mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting
#endif
+__v7_setup:
adr r12, __v7_setup_stack @ the local stack
stmia r12, {r0-r5, r7, r9, r11, lr}
bl v7_flush_dcache_all
@@ -201,11 +202,16 @@
mrc p15, 0, r0, c0, c0, 0 @ read main ID register
and r10, r0, #0xff000000 @ ARM?
teq r10, #0x41000000
- bne 2f
+ bne 3f
and r5, r0, #0x00f00000 @ variant
and r6, r0, #0x0000000f @ revision
- orr r0, r6, r5, lsr #20-4 @ combine variant and revision
+ orr r6, r6, r5, lsr #20-4 @ combine variant and revision
+ ubfx r0, r0, #4, #12 @ primary part number
+ /* Cortex-A8 Errata */
+ ldr r10, =0x00000c08 @ Cortex-A8 primary part number
+ teq r0, r10
+ bne 2f
#ifdef CONFIG_ARM_ERRATA_430973
teq r5, #0x00100000 @ only present in r1p*
mrceq p15, 0, r10, c1, c0, 1 @ read aux control register
@@ -213,21 +219,42 @@
mcreq p15, 0, r10, c1, c0, 1 @ write aux control register
#endif
#ifdef CONFIG_ARM_ERRATA_458693
- teq r0, #0x20 @ only present in r2p0
+ teq r6, #0x20 @ only present in r2p0
mrceq p15, 0, r10, c1, c0, 1 @ read aux control register
orreq r10, r10, #(1 << 5) @ set L1NEON to 1
orreq r10, r10, #(1 << 9) @ set PLDNOP to 1
mcreq p15, 0, r10, c1, c0, 1 @ write aux control register
#endif
#ifdef CONFIG_ARM_ERRATA_460075
- teq r0, #0x20 @ only present in r2p0
+ teq r6, #0x20 @ only present in r2p0
mrceq p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register
tsteq r10, #1 << 22
orreq r10, r10, #(1 << 22) @ set the Write Allocate disable bit
mcreq p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register
#endif
+ b 3f
-2: mov r10, #0
+ /* Cortex-A9 Errata */
+2: ldr r10, =0x00000c09 @ Cortex-A9 primary part number
+ teq r0, r10
+ bne 3f
+#ifdef CONFIG_ARM_ERRATA_742230
+ cmp r6, #0x22 @ only present up to r2p2
+ mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register
+ orrle r10, r10, #1 << 4 @ set bit #4
+ mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register
+#endif
+#ifdef CONFIG_ARM_ERRATA_742231
+ teq r6, #0x20 @ present in r2p0
+ teqne r6, #0x21 @ present in r2p1
+ teqne r6, #0x22 @ present in r2p2
+ mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
+ orreq r10, r10, #1 << 12 @ set bit #12
+ orreq r10, r10, #1 << 22 @ set bit #22
+ mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
+#endif
+
+3: mov r10, #0
#ifdef HARVARD_CACHE
mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
#endif
@@ -235,7 +262,8 @@
#ifdef CONFIG_MMU
mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
mcr p15, 0, r10, c2, c0, 2 @ TTB control register
- orr r4, r4, #TTB_FLAGS
+ ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
+ ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
mcr p15, 0, r4, c2, c0, 1 @ load TTB1
mov r10, #0x1f @ domains 0, 1 = manager
mcr p15, 0, r10, c3, c0, 0 @ load domain access register
@@ -323,6 +351,35 @@
.section ".proc.info.init", #alloc, #execinstr
+ .type __v7_ca9mp_proc_info, #object
+__v7_ca9mp_proc_info:
+ .long 0x410fc090 @ Required ID value
+ .long 0xff0ffff0 @ Mask for ID
+ ALT_SMP(.long \
+ PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ | \
+ PMD_FLAGS_SMP)
+ ALT_UP(.long \
+ PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ | \
+ PMD_FLAGS_UP)
+ .long PMD_TYPE_SECT | \
+ PMD_SECT_XN | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
+ b __v7_ca9mp_setup
+ .long cpu_arch_name
+ .long cpu_elf_name
+ .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
+ .long cpu_v7_name
+ .long v7_processor_functions
+ .long v7wbi_tlb_fns
+ .long v6_user_fns
+ .long v7_cache_fns
+ .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
+
/*
* Match any ARMv7 processor core.
*/
@@ -330,10 +387,16 @@
__v7_proc_info:
.long 0x000f0000 @ Required ID value
.long 0x000f0000 @ Mask for ID
- .long PMD_TYPE_SECT | \
+ ALT_SMP(.long \
+ PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ | \
- PMD_FLAGS
+ PMD_FLAGS_SMP)
+ ALT_UP(.long \
+ PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ | \
+ PMD_FLAGS_UP)
.long PMD_TYPE_SECT | \
PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index f3f288a..53cd5b4 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -13,6 +13,7 @@
*/
#include <linux/init.h>
#include <linux/linkage.h>
+#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/page.h>
#include <asm/tlbflush.h>
@@ -41,20 +42,15 @@
orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
mov r1, r1, lsl #PAGE_SHIFT
1:
-#ifdef CONFIG_SMP
- mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable)
-#else
- mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA
-#endif
+ ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
+ ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
+
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
mov ip, #0
-#ifdef CONFIG_SMP
- mcr p15, 0, ip, c7, c1, 6 @ flush BTAC/BTB Inner Shareable
-#else
- mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB
-#endif
+ ALT_SMP(mcr p15, 0, ip, c7, c1, 6) @ flush BTAC/BTB Inner Shareable
+ ALT_UP(mcr p15, 0, ip, c7, c5, 6) @ flush BTAC/BTB
dsb
mov pc, lr
ENDPROC(v7wbi_flush_user_tlb_range)
@@ -74,20 +70,14 @@
mov r0, r0, lsl #PAGE_SHIFT
mov r1, r1, lsl #PAGE_SHIFT
1:
-#ifdef CONFIG_SMP
- mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable)
-#else
- mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA
-#endif
+ ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
+ ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
mov r2, #0
-#ifdef CONFIG_SMP
- mcr p15, 0, r2, c7, c1, 6 @ flush BTAC/BTB Inner Shareable
-#else
- mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
-#endif
+ ALT_SMP(mcr p15, 0, r2, c7, c1, 6) @ flush BTAC/BTB Inner Shareable
+ ALT_UP(mcr p15, 0, r2, c7, c5, 6) @ flush BTAC/BTB
dsb
isb
mov pc, lr
@@ -99,5 +89,6 @@
ENTRY(v7wbi_tlb_fns)
.long v7wbi_flush_user_tlb_range
.long v7wbi_flush_kern_tlb_range
- .long v7wbi_tlb_flags
+ ALT_SMP(.long v7wbi_tlb_flags_smp)
+ ALT_UP(.long v7wbi_tlb_flags_up)
.size v7wbi_tlb_fns, . - v7wbi_tlb_fns
diff --git a/arch/arm/plat-mxc/Kconfig b/arch/arm/plat-mxc/Kconfig
index 0527e65..6785db4 100644
--- a/arch/arm/plat-mxc/Kconfig
+++ b/arch/arm/plat-mxc/Kconfig
@@ -43,6 +43,7 @@
config ARCH_MX5
bool "MX5-based"
select CPU_V7
+ select ARM_L1_CACHE_SHIFT_6
help
This enables support for systems based on the Freescale i.MX51 family
diff --git a/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h b/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h
index 634e3f4..656acb4 100644
--- a/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h
+++ b/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h
@@ -37,9 +37,9 @@
* mach-mx5/eukrea_mbimx51-baseboard.c for cpuimx51
*/
-extern void eukrea_mbimx25_baseboard_init(void);
+extern void eukrea_mbimxsd25_baseboard_init(void);
extern void eukrea_mbimx27_baseboard_init(void);
-extern void eukrea_mbimx35_baseboard_init(void);
+extern void eukrea_mbimxsd35_baseboard_init(void);
extern void eukrea_mbimx51_baseboard_init(void);
#endif
diff --git a/arch/arm/plat-mxc/tzic.c b/arch/arm/plat-mxc/tzic.c
index b3da9aa..3703ab2 100644
--- a/arch/arm/plat-mxc/tzic.c
+++ b/arch/arm/plat-mxc/tzic.c
@@ -164,8 +164,9 @@
return -EAGAIN;
for (i = 0; i < 4; i++) {
- v = is_idle ? __raw_readl(TZIC_ENSET0(i)) : wakeup_intr[i];
- __raw_writel(v, TZIC_WAKEUP0(i));
+ v = is_idle ? __raw_readl(tzic_base + TZIC_ENSET0(i)) :
+ wakeup_intr[i];
+ __raw_writel(v, tzic_base + TZIC_WAKEUP0(i));
}
return 0;
diff --git a/arch/arm/plat-nomadik/gpio.c b/arch/arm/plat-nomadik/gpio.c
index 977c8f9..85e6fd21 100644
--- a/arch/arm/plat-nomadik/gpio.c
+++ b/arch/arm/plat-nomadik/gpio.c
@@ -102,6 +102,22 @@
writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRC);
}
+static void __nmk_gpio_set_output(struct nmk_gpio_chip *nmk_chip,
+ unsigned offset, int val)
+{
+ if (val)
+ writel(1 << offset, nmk_chip->addr + NMK_GPIO_DATS);
+ else
+ writel(1 << offset, nmk_chip->addr + NMK_GPIO_DATC);
+}
+
+static void __nmk_gpio_make_output(struct nmk_gpio_chip *nmk_chip,
+ unsigned offset, int val)
+{
+ writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRS);
+ __nmk_gpio_set_output(nmk_chip, offset, val);
+}
+
static void __nmk_config_pin(struct nmk_gpio_chip *nmk_chip, unsigned offset,
pin_cfg_t cfg)
{
@@ -118,20 +134,29 @@
[3] /* illegal */ = "??"
};
static const char *slpmnames[] = {
- [NMK_GPIO_SLPM_INPUT] = "input",
- [NMK_GPIO_SLPM_NOCHANGE] = "no-change",
+ [NMK_GPIO_SLPM_INPUT] = "input/wakeup",
+ [NMK_GPIO_SLPM_NOCHANGE] = "no-change/no-wakeup",
};
int pin = PIN_NUM(cfg);
int pull = PIN_PULL(cfg);
int af = PIN_ALT(cfg);
int slpm = PIN_SLPM(cfg);
+ int output = PIN_DIR(cfg);
+ int val = PIN_VAL(cfg);
- dev_dbg(nmk_chip->chip.dev, "pin %d: af %s, pull %s, slpm %s\n",
- pin, afnames[af], pullnames[pull], slpmnames[slpm]);
+ dev_dbg(nmk_chip->chip.dev, "pin %d: af %s, pull %s, slpm %s (%s%s)\n",
+ pin, afnames[af], pullnames[pull], slpmnames[slpm],
+ output ? "output " : "input",
+ output ? (val ? "high" : "low") : "");
- __nmk_gpio_make_input(nmk_chip, offset);
- __nmk_gpio_set_pull(nmk_chip, offset, pull);
+ if (output)
+ __nmk_gpio_make_output(nmk_chip, offset, val);
+ else {
+ __nmk_gpio_make_input(nmk_chip, offset);
+ __nmk_gpio_set_pull(nmk_chip, offset, pull);
+ }
+
__nmk_gpio_set_slpm(nmk_chip, offset, slpm);
__nmk_gpio_set_mode(nmk_chip, offset, af);
}
@@ -200,6 +225,10 @@
* changed to an input (with pullup/down enabled) in sleep and deep sleep. If
* @mode is NMK_GPIO_SLPM_NOCHANGE, the pin remains in the state it was
* configured even when in sleep and deep sleep.
+ *
+ * On DB8500v2 onwards, this setting loses the previous meaning and instead
+ * indicates if wakeup detection is enabled on the pin. Note that
+ * enable_irq_wake() will automatically enable wakeup detection.
*/
int nmk_gpio_set_slpm(int gpio, enum nmk_gpio_slpm mode)
{
@@ -367,7 +396,27 @@
static int nmk_gpio_irq_set_wake(unsigned int irq, unsigned int on)
{
- return nmk_gpio_irq_modify(irq, WAKE, on);
+ struct nmk_gpio_chip *nmk_chip;
+ unsigned long flags;
+ int gpio;
+
+ gpio = NOMADIK_IRQ_TO_GPIO(irq);
+ nmk_chip = get_irq_chip_data(irq);
+ if (!nmk_chip)
+ return -EINVAL;
+
+ spin_lock_irqsave(&nmk_chip->lock, flags);
+#ifdef CONFIG_ARCH_U8500
+ if (cpu_is_u8500v2()) {
+ __nmk_gpio_set_slpm(nmk_chip, gpio,
+ on ? NMK_GPIO_SLPM_WAKEUP_ENABLE
+ : NMK_GPIO_SLPM_WAKEUP_DISABLE);
+ }
+#endif
+ __nmk_gpio_irq_modify(nmk_chip, gpio, WAKE, on);
+ spin_unlock_irqrestore(&nmk_chip->lock, flags);
+
+ return 0;
}
static int nmk_gpio_irq_set_type(unsigned int irq, unsigned int type)
@@ -495,12 +544,8 @@
{
struct nmk_gpio_chip *nmk_chip =
container_of(chip, struct nmk_gpio_chip, chip);
- u32 bit = 1 << offset;
- if (val)
- writel(bit, nmk_chip->addr + NMK_GPIO_DATS);
- else
- writel(bit, nmk_chip->addr + NMK_GPIO_DATC);
+ __nmk_gpio_set_output(nmk_chip, offset, val);
}
static int nmk_gpio_make_output(struct gpio_chip *chip, unsigned offset,
@@ -509,8 +554,7 @@
struct nmk_gpio_chip *nmk_chip =
container_of(chip, struct nmk_gpio_chip, chip);
- writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRS);
- nmk_gpio_set_output(chip, offset, val);
+ __nmk_gpio_make_output(nmk_chip, offset, val);
return 0;
}
@@ -534,7 +578,7 @@
.can_sleep = 0,
};
-static int __init nmk_gpio_probe(struct platform_device *dev)
+static int __devinit nmk_gpio_probe(struct platform_device *dev)
{
struct nmk_gpio_platform_data *pdata = dev->dev.platform_data;
struct nmk_gpio_chip *nmk_chip;
diff --git a/arch/arm/plat-nomadik/include/plat/gpio.h b/arch/arm/plat-nomadik/include/plat/gpio.h
index aba3551..67b113d 100644
--- a/arch/arm/plat-nomadik/include/plat/gpio.h
+++ b/arch/arm/plat-nomadik/include/plat/gpio.h
@@ -65,7 +65,9 @@
/* Sleep mode */
enum nmk_gpio_slpm {
NMK_GPIO_SLPM_INPUT,
+ NMK_GPIO_SLPM_WAKEUP_ENABLE = NMK_GPIO_SLPM_INPUT,
NMK_GPIO_SLPM_NOCHANGE,
+ NMK_GPIO_SLPM_WAKEUP_DISABLE = NMK_GPIO_SLPM_NOCHANGE,
};
extern int nmk_gpio_set_slpm(int gpio, enum nmk_gpio_slpm mode);
diff --git a/arch/arm/plat-nomadik/include/plat/pincfg.h b/arch/arm/plat-nomadik/include/plat/pincfg.h
index 7eed11c..8c5ae3f 100644
--- a/arch/arm/plat-nomadik/include/plat/pincfg.h
+++ b/arch/arm/plat-nomadik/include/plat/pincfg.h
@@ -19,12 +19,16 @@
* bit 9..10 - Alternate Function Selection
* bit 11..12 - Pull up/down state
* bit 13 - Sleep mode behaviour
+ * bit 14 - (sleep mode) Direction
+ * bit 15 - (sleep mode) Value (if output)
*
* to facilitate the definition, the following macros are provided
*
* PIN_CFG_DEFAULT - default config (0):
* pull up/down = disabled
- * sleep mode = input
+ * sleep mode = input/wakeup
+ * (sleep mode) direction = input
+ * (sleep mode) value = low
*
* PIN_CFG - default config with alternate function
* PIN_CFG_PULL - default config with alternate function and pull up/down
@@ -53,8 +57,36 @@
#define PIN_SLPM_SHIFT 13
#define PIN_SLPM_MASK (0x1 << PIN_SLPM_SHIFT)
#define PIN_SLPM(x) (((x) & PIN_SLPM_MASK) >> PIN_SLPM_SHIFT)
-#define PIN_SLPM_INPUT (NMK_GPIO_SLPM_INPUT << PIN_SLPM_SHIFT)
+#define PIN_SLPM_MAKE_INPUT (NMK_GPIO_SLPM_INPUT << PIN_SLPM_SHIFT)
#define PIN_SLPM_NOCHANGE (NMK_GPIO_SLPM_NOCHANGE << PIN_SLPM_SHIFT)
+/* These two replace the above in DB8500v2+ */
+#define PIN_SLPM_WAKEUP_ENABLE (NMK_GPIO_SLPM_WAKEUP_ENABLE << PIN_SLPM_SHIFT)
+#define PIN_SLPM_WAKEUP_DISABLE (NMK_GPIO_SLPM_WAKEUP_DISABLE << PIN_SLPM_SHIFT)
+
+#define PIN_DIR_SHIFT 14
+#define PIN_DIR_MASK (0x1 << PIN_DIR_SHIFT)
+#define PIN_DIR(x) (((x) & PIN_DIR_MASK) >> PIN_DIR_SHIFT)
+#define PIN_DIR_INPUT (0 << PIN_DIR_SHIFT)
+#define PIN_DIR_OUTPUT (1 << PIN_DIR_SHIFT)
+
+#define PIN_VAL_SHIFT 15
+#define PIN_VAL_MASK (0x1 << PIN_VAL_SHIFT)
+#define PIN_VAL(x) (((x) & PIN_VAL_MASK) >> PIN_VAL_SHIFT)
+#define PIN_VAL_LOW (0 << PIN_VAL_SHIFT)
+#define PIN_VAL_HIGH (1 << PIN_VAL_SHIFT)
+
+/* Shortcuts. Use these instead of separate DIR and VAL. */
+#define PIN_INPUT PIN_DIR_INPUT
+#define PIN_OUTPUT_LOW (PIN_DIR_OUTPUT | PIN_VAL_LOW)
+#define PIN_OUTPUT_HIGH (PIN_DIR_OUTPUT | PIN_VAL_HIGH)
+
+/*
+ * These are the same as the ones above, but should make more sense to the
+ * reader when seen along with a setting a pin to AF mode.
+ */
+#define PIN_SLPM_INPUT PIN_INPUT
+#define PIN_SLPM_OUTPUT_LOW PIN_OUTPUT_LOW
+#define PIN_SLPM_OUTPUT_HIGH PIN_OUTPUT_HIGH
#define PIN_CFG_DEFAULT (PIN_PULL_NONE | PIN_SLPM_INPUT)
diff --git a/arch/arm/plat-nomadik/timer.c b/arch/arm/plat-nomadik/timer.c
index ea3ca86..aedf9c1 100644
--- a/arch/arm/plat-nomadik/timer.c
+++ b/arch/arm/plat-nomadik/timer.c
@@ -1,5 +1,5 @@
/*
- * linux/arch/arm/mach-nomadik/timer.c
+ * linux/arch/arm/plat-nomadik/timer.c
*
* Copyright (C) 2008 STMicroelectronics
* Copyright (C) 2010 Alessandro Rubini
@@ -75,7 +75,7 @@
cr = readl(mtu_base + MTU_CR(1));
writel(0, mtu_base + MTU_LR(1));
writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1));
- writel(0x2, mtu_base + MTU_IMSC);
+ writel(1 << 1, mtu_base + MTU_IMSC);
break;
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_UNUSED:
@@ -131,25 +131,23 @@
{
unsigned long rate;
struct clk *clk0;
- struct clk *clk1;
- u32 cr;
+ u32 cr = MTU_CRn_32BITS;
clk0 = clk_get_sys("mtu0", NULL);
BUG_ON(IS_ERR(clk0));
- clk1 = clk_get_sys("mtu1", NULL);
- BUG_ON(IS_ERR(clk1));
-
clk_enable(clk0);
- clk_enable(clk1);
/*
- * Tick rate is 2.4MHz for Nomadik and 110MHz for ux500:
- * use a divide-by-16 counter if it's more than 16MHz
+ * Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz
+ * for ux500.
+ * Use a divide-by-16 counter if the tick rate is more than 32MHz.
+ * At 32 MHz, the timer (with 32 bit counter) can be programmed
+ * to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer
+ * with 16 gives too low timer resolution.
*/
- cr = MTU_CRn_32BITS;;
rate = clk_get_rate(clk0);
- if (rate > 16 << 20) {
+ if (rate > 32000000) {
rate /= 16;
cr |= MTU_CRn_PRESCALE_16;
} else {
@@ -170,15 +168,8 @@
pr_err("timer: failed to initialize clock source %s\n",
nmdk_clksrc.name);
- /* Timer 1 is used for events, fix according to rate */
- cr = MTU_CRn_32BITS;
- rate = clk_get_rate(clk1);
- if (rate > 16 << 20) {
- rate /= 16;
- cr |= MTU_CRn_PRESCALE_16;
- } else {
- cr |= MTU_CRn_PRESCALE_1;
- }
+ /* Timer 1 is used for events */
+
clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE);
writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */
diff --git a/arch/arm/plat-omap/include/plat/smp.h b/arch/arm/plat-omap/include/plat/smp.h
index 5177a9c..ecd6a48 100644
--- a/arch/arm/plat-omap/include/plat/smp.h
+++ b/arch/arm/plat-omap/include/plat/smp.h
@@ -18,6 +18,7 @@
#define OMAP_ARCH_SMP_H
#include <asm/hardware/gic.h>
+#include <asm/smp_mpidr.h>
/* Needed for secondary core boot */
extern void omap_secondary_startup(void);
@@ -33,15 +34,4 @@
gic_raise_softirq(mask, 1);
}
-/*
- * Read MPIDR: Multiprocessor affinity register
- */
-#define hard_smp_processor_id() \
- ({ \
- unsigned int cpunum; \
- __asm__("mrc p15, 0, %0, c0, c0, 5" \
- : "=r" (cpunum)); \
- cpunum &= 0x0F; \
- })
-
#endif
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 226b2e8..10b3b4c 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -220,20 +220,7 @@
if (omap_sram_size == 0)
return;
- if (cpu_is_omap24xx()) {
- omap_sram_io_desc[0].virtual = OMAP2_SRAM_VA;
-
- base = OMAP2_SRAM_PA;
- base = ROUND_DOWN(base, PAGE_SIZE);
- omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
- }
-
if (cpu_is_omap34xx()) {
- omap_sram_io_desc[0].virtual = OMAP3_SRAM_VA;
- base = OMAP3_SRAM_PA;
- base = ROUND_DOWN(base, PAGE_SIZE);
- omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
-
/*
* SRAM must be marked as non-cached on OMAP3 since the
* CORE DPLL M2 divider change code (in SRAM) runs with the
@@ -244,13 +231,11 @@
omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED;
}
- if (cpu_is_omap44xx()) {
- omap_sram_io_desc[0].virtual = OMAP4_SRAM_VA;
- base = OMAP4_SRAM_PA;
- base = ROUND_DOWN(base, PAGE_SIZE);
- omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
- }
- omap_sram_io_desc[0].length = 1024 * 1024; /* Use section desc */
+ omap_sram_io_desc[0].virtual = omap_sram_base;
+ base = omap_sram_start;
+ base = ROUND_DOWN(base, PAGE_SIZE);
+ omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
+ omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE);
iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc));
printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n",
diff --git a/arch/arm/plat-pxa/pwm.c b/arch/arm/plat-pxa/pwm.c
index 0732c6c..ef32686 100644
--- a/arch/arm/plat-pxa/pwm.c
+++ b/arch/arm/plat-pxa/pwm.c
@@ -176,7 +176,7 @@
static int __devinit pwm_probe(struct platform_device *pdev)
{
- struct platform_device_id *id = platform_get_device_id(pdev);
+ const struct platform_device_id *id = platform_get_device_id(pdev);
struct pwm_device *pwm, *secondary = NULL;
struct resource *r;
int ret = 0;
diff --git a/arch/arm/plat-s5p/dev-fimc0.c b/arch/arm/plat-s5p/dev-fimc0.c
index d3f1a9b..608770f 100644
--- a/arch/arm/plat-s5p/dev-fimc0.c
+++ b/arch/arm/plat-s5p/dev-fimc0.c
@@ -10,6 +10,7 @@
*/
#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
@@ -18,7 +19,7 @@
static struct resource s5p_fimc0_resource[] = {
[0] = {
.start = S5P_PA_FIMC0,
- .end = S5P_PA_FIMC0 + SZ_1M - 1,
+ .end = S5P_PA_FIMC0 + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -28,9 +29,15 @@
},
};
+static u64 s5p_fimc0_dma_mask = DMA_BIT_MASK(32);
+
struct platform_device s5p_device_fimc0 = {
.name = "s5p-fimc",
.id = 0,
.num_resources = ARRAY_SIZE(s5p_fimc0_resource),
.resource = s5p_fimc0_resource,
+ .dev = {
+ .dma_mask = &s5p_fimc0_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
};
diff --git a/arch/arm/plat-s5p/dev-fimc1.c b/arch/arm/plat-s5p/dev-fimc1.c
index 41bd698..76e3a97 100644
--- a/arch/arm/plat-s5p/dev-fimc1.c
+++ b/arch/arm/plat-s5p/dev-fimc1.c
@@ -10,6 +10,7 @@
*/
#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
@@ -18,7 +19,7 @@
static struct resource s5p_fimc1_resource[] = {
[0] = {
.start = S5P_PA_FIMC1,
- .end = S5P_PA_FIMC1 + SZ_1M - 1,
+ .end = S5P_PA_FIMC1 + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -28,9 +29,15 @@
},
};
+static u64 s5p_fimc1_dma_mask = DMA_BIT_MASK(32);
+
struct platform_device s5p_device_fimc1 = {
.name = "s5p-fimc",
.id = 1,
.num_resources = ARRAY_SIZE(s5p_fimc1_resource),
.resource = s5p_fimc1_resource,
+ .dev = {
+ .dma_mask = &s5p_fimc1_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
};
diff --git a/arch/arm/plat-s5p/dev-fimc2.c b/arch/arm/plat-s5p/dev-fimc2.c
index dfddeda..24d2981 100644
--- a/arch/arm/plat-s5p/dev-fimc2.c
+++ b/arch/arm/plat-s5p/dev-fimc2.c
@@ -10,6 +10,7 @@
*/
#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
@@ -18,7 +19,7 @@
static struct resource s5p_fimc2_resource[] = {
[0] = {
.start = S5P_PA_FIMC2,
- .end = S5P_PA_FIMC2 + SZ_1M - 1,
+ .end = S5P_PA_FIMC2 + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -28,9 +29,15 @@
},
};
+static u64 s5p_fimc2_dma_mask = DMA_BIT_MASK(32);
+
struct platform_device s5p_device_fimc2 = {
.name = "s5p-fimc",
.id = 2,
.num_resources = ARRAY_SIZE(s5p_fimc2_resource),
.resource = s5p_fimc2_resource,
+ .dev = {
+ .dma_mask = &s5p_fimc2_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
};
diff --git a/arch/arm/plat-samsung/gpio-config.c b/arch/arm/plat-samsung/gpio-config.c
index 57b68a5..e3d41ea 100644
--- a/arch/arm/plat-samsung/gpio-config.c
+++ b/arch/arm/plat-samsung/gpio-config.c
@@ -273,13 +273,13 @@
if (!chip)
return -EINVAL;
- off = chip->chip.base - pin;
+ off = pin - chip->chip.base;
shift = off * 2;
reg = chip->base + 0x0C;
drvstr = __raw_readl(reg);
- drvstr = 0xffff & (0x3 << shift);
drvstr = drvstr >> shift;
+ drvstr &= 0x3;
return (__force s5p_gpio_drvstr_t)drvstr;
}
@@ -296,11 +296,12 @@
if (!chip)
return -EINVAL;
- off = chip->chip.base - pin;
+ off = pin - chip->chip.base;
shift = off * 2;
reg = chip->base + 0x0C;
tmp = __raw_readl(reg);
+ tmp &= ~(0x3 << shift);
tmp |= drvstr << shift;
__raw_writel(tmp, reg);
diff --git a/arch/arm/plat-samsung/include/plat/gpio-cfg.h b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
index db4112c..1c6b929 100644
--- a/arch/arm/plat-samsung/include/plat/gpio-cfg.h
+++ b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
@@ -143,12 +143,12 @@
/* Define values for the drvstr available for each gpio pin.
*
* These values control the value of the output signal driver strength,
- * configurable on most pins on the S5C series.
+ * configurable on most pins on the S5P series.
*/
-#define S5P_GPIO_DRVSTR_LV1 ((__force s5p_gpio_drvstr_t)0x00)
-#define S5P_GPIO_DRVSTR_LV2 ((__force s5p_gpio_drvstr_t)0x01)
-#define S5P_GPIO_DRVSTR_LV3 ((__force s5p_gpio_drvstr_t)0x10)
-#define S5P_GPIO_DRVSTR_LV4 ((__force s5p_gpio_drvstr_t)0x11)
+#define S5P_GPIO_DRVSTR_LV1 ((__force s5p_gpio_drvstr_t)0x0)
+#define S5P_GPIO_DRVSTR_LV2 ((__force s5p_gpio_drvstr_t)0x2)
+#define S5P_GPIO_DRVSTR_LV3 ((__force s5p_gpio_drvstr_t)0x1)
+#define S5P_GPIO_DRVSTR_LV4 ((__force s5p_gpio_drvstr_t)0x3)
/**
* s5c_gpio_get_drvstr() - get the driver streght value of a gpio pin
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 48cbdcb..55590a4 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -12,7 +12,7 @@
#
# http://www.arm.linux.org.uk/developer/machines/?action=new
#
-# Last update: Mon Jul 12 21:10:14 2010
+# Last update: Thu Sep 9 22:43:01 2010
#
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
#
@@ -2622,7 +2622,7 @@
gw2388 MACH_GW2388 GW2388 2635
jadecpu MACH_JADECPU JADECPU 2636
carlisle MACH_CARLISLE CARLISLE 2637
-lux_sf9 MACH_LUX_SFT9 LUX_SFT9 2638
+lux_sf9 MACH_LUX_SF9 LUX_SF9 2638
nemid_tb MACH_NEMID_TB NEMID_TB 2639
terrier MACH_TERRIER TERRIER 2640
turbot MACH_TURBOT TURBOT 2641
@@ -2950,3 +2950,97 @@
netviz MACH_NETVIZ NETVIZ 2964
flexibity MACH_FLEXIBITY FLEXIBITY 2965
wlan_computer MACH_WLAN_COMPUTER WLAN_COMPUTER 2966
+lpc24xx MACH_LPC24XX LPC24XX 2967
+spica MACH_SPICA SPICA 2968
+gpsdisplay MACH_GPSDISPLAY GPSDISPLAY 2969
+bipnet MACH_BIPNET BIPNET 2970
+overo_ctu_inertial MACH_OVERO_CTU_INERTIAL OVERO_CTU_INERTIAL 2971
+davinci_dm355_mmm MACH_DAVINCI_DM355_MMM DAVINCI_DM355_MMM 2972
+pc9260_v2 MACH_PC9260_V2 PC9260_V2 2973
+ptx7545 MACH_PTX7545 PTX7545 2974
+tm_efdc MACH_TM_EFDC TM_EFDC 2975
+omap3_waldo1 MACH_OMAP3_WALDO1 OMAP3_WALDO1 2977
+flyer MACH_FLYER FLYER 2978
+tornado3240 MACH_TORNADO3240 TORNADO3240 2979
+soli_01 MACH_SOLI_01 SOLI_01 2980
+omapl138_europalc MACH_OMAPL138_EUROPALC OMAPL138_EUROPALC 2981
+helios_v1 MACH_HELIOS_V1 HELIOS_V1 2982
+netspace_lite_v2 MACH_NETSPACE_LITE_V2 NETSPACE_LITE_V2 2983
+ssc MACH_SSC SSC 2984
+premierwave_en MACH_PREMIERWAVE_EN PREMIERWAVE_EN 2985
+wasabi MACH_WASABI WASABI 2986
+vivow MACH_VIVOW VIVOW 2987
+mx50_rdp MACH_MX50_RDP MX50_RDP 2988
+universal MACH_UNIVERSAL UNIVERSAL 2989
+real6410 MACH_REAL6410 REAL6410 2990
+spx_sakura MACH_SPX_SAKURA SPX_SAKURA 2991
+ij3k_2440 MACH_IJ3K_2440 IJ3K_2440 2992
+omap3_bc10 MACH_OMAP3_BC10 OMAP3_BC10 2993
+thebe MACH_THEBE THEBE 2994
+rv082 MACH_RV082 RV082 2995
+armlguest MACH_ARMLGUEST ARMLGUEST 2996
+tjinc1000 MACH_TJINC1000 TJINC1000 2997
+dockstar MACH_DOCKSTAR DOCKSTAR 2998
+ax8008 MACH_AX8008 AX8008 2999
+gnet_sgce MACH_GNET_SGCE GNET_SGCE 3000
+pxwnas_500_1000 MACH_PXWNAS_500_1000 PXWNAS_500_1000 3001
+ea20 MACH_EA20 EA20 3002
+awm2 MACH_AWM2 AWM2 3003
+ti8148evm MACH_TI8148EVM TI8148EVM 3004
+tegra_seaboard MACH_TEGRA_SEABOARD TEGRA_SEABOARD 3005
+linkstation_chlv2 MACH_LINKSTATION_CHLV2 LINKSTATION_CHLV2 3006
+tera_pro2_rack MACH_TERA_PRO2_RACK TERA_PRO2_RACK 3007
+rubys MACH_RUBYS RUBYS 3008
+aquarius MACH_AQUARIUS AQUARIUS 3009
+mx53_ard MACH_MX53_ARD MX53_ARD 3010
+mx53_smd MACH_MX53_SMD MX53_SMD 3011
+lswxl MACH_LSWXL LSWXL 3012
+dove_avng_v3 MACH_DOVE_AVNG_V3 DOVE_AVNG_V3 3013
+sdi_ess_9263 MACH_SDI_ESS_9263 SDI_ESS_9263 3014
+jocpu550 MACH_JOCPU550 JOCPU550 3015
+msm8x60_rumi3 MACH_MSM8X60_RUMI3 MSM8X60_RUMI3 3016
+msm8x60_ffa MACH_MSM8X60_FFA MSM8X60_FFA 3017
+yanomami MACH_YANOMAMI YANOMAMI 3018
+gta04 MACH_GTA04 GTA04 3019
+cm_a510 MACH_CM_A510 CM_A510 3020
+omap3_rfs200 MACH_OMAP3_RFS200 OMAP3_RFS200 3021
+kx33xx MACH_KX33XX KX33XX 3022
+ptx7510 MACH_PTX7510 PTX7510 3023
+top9000 MACH_TOP9000 TOP9000 3024
+teenote MACH_TEENOTE TEENOTE 3025
+ts3 MACH_TS3 TS3 3026
+a0 MACH_A0 A0 3027
+fsm9xxx_surf MACH_FSM9XXX_SURF FSM9XXX_SURF 3028
+fsm9xxx_ffa MACH_FSM9XXX_FFA FSM9XXX_FFA 3029
+frrhwcdma60w MACH_FRRHWCDMA60W FRRHWCDMA60W 3030
+remus MACH_REMUS REMUS 3031
+at91cap7xdk MACH_AT91CAP7XDK AT91CAP7XDK 3032
+at91cap7stk MACH_AT91CAP7STK AT91CAP7STK 3033
+kt_sbc_sam9_1 MACH_KT_SBC_SAM9_1 KT_SBC_SAM9_1 3034
+oratisrouter MACH_ORATISROUTER ORATISROUTER 3035
+armada_xp_db MACH_ARMADA_XP_DB ARMADA_XP_DB 3036
+spdm MACH_SPDM SPDM 3037
+gtib MACH_GTIB GTIB 3038
+dgm3240 MACH_DGM3240 DGM3240 3039
+atlas_i_lpe MACH_ATLAS_I_LPE ATLAS_I_LPE 3040
+htcmega MACH_HTCMEGA HTCMEGA 3041
+tricorder MACH_TRICORDER TRICORDER 3042
+tx28 MACH_TX28 TX28 3043
+bstbrd MACH_BSTBRD BSTBRD 3044
+pwb3090 MACH_PWB3090 PWB3090 3045
+idea6410 MACH_IDEA6410 IDEA6410 3046
+qbc9263 MACH_QBC9263 QBC9263 3047
+borabora MACH_BORABORA BORABORA 3048
+valdez MACH_VALDEZ VALDEZ 3049
+ls9g20 MACH_LS9G20 LS9G20 3050
+mios_v1 MACH_MIOS_V1 MIOS_V1 3051
+s5pc110_crespo MACH_S5PC110_CRESPO S5PC110_CRESPO 3052
+controltek9g20 MACH_CONTROLTEK9G20 CONTROLTEK9G20 3053
+tin307 MACH_TIN307 TIN307 3054
+tin510 MACH_TIN510 TIN510 3055
+bluecheese MACH_BLUECHEESE BLUECHEESE 3057
+tem3x30 MACH_TEM3X30 TEM3X30 3058
+harvest_desoto MACH_HARVEST_DESOTO HARVEST_DESOTO 3059
+msm8x60_qrdc MACH_MSM8X60_QRDC MSM8X60_QRDC 3060
+spear900 MACH_SPEAR900 SPEAR900 3061
+pcontrol_g20 MACH_PCONTROL_G20 PCONTROL_G20 3062
diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c
index 0974c0e..bab0129 100644
--- a/arch/frv/kernel/signal.c
+++ b/arch/frv/kernel/signal.c
@@ -121,6 +121,9 @@
struct user_context *user = current->thread.user;
unsigned long tbr, psr;
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
tbr = user->i.tbr;
psr = user->i.psr;
if (copy_from_user(user, &sc->sc_context, sizeof(sc->sc_context)))
@@ -250,6 +253,8 @@
struct sigframe __user *frame;
int rsig;
+ set_fs(USER_DS);
+
frame = get_sigframe(ka, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
@@ -293,22 +298,23 @@
(unsigned long) (frame->retcode + 2));
}
- /* set up registers for signal handler */
- __frame->sp = (unsigned long) frame;
- __frame->lr = (unsigned long) &frame->retcode;
- __frame->gr8 = sig;
-
+ /* Set up registers for the signal handler */
if (current->personality & FDPIC_FUNCPTRS) {
struct fdpic_func_descriptor __user *funcptr =
(struct fdpic_func_descriptor __user *) ka->sa.sa_handler;
- __get_user(__frame->pc, &funcptr->text);
- __get_user(__frame->gr15, &funcptr->GOT);
+ struct fdpic_func_descriptor desc;
+ if (copy_from_user(&desc, funcptr, sizeof(desc)))
+ goto give_sigsegv;
+ __frame->pc = desc.text;
+ __frame->gr15 = desc.GOT;
} else {
__frame->pc = (unsigned long) ka->sa.sa_handler;
__frame->gr15 = 0;
}
- set_fs(USER_DS);
+ __frame->sp = (unsigned long) frame;
+ __frame->lr = (unsigned long) &frame->retcode;
+ __frame->gr8 = sig;
/* the tracer may want to single-step inside the handler */
if (test_thread_flag(TIF_SINGLESTEP))
@@ -323,7 +329,7 @@
return 0;
give_sigsegv:
- force_sig(SIGSEGV, current);
+ force_sigsegv(sig, current);
return -EFAULT;
} /* end setup_frame() */
@@ -338,6 +344,8 @@
struct rt_sigframe __user *frame;
int rsig;
+ set_fs(USER_DS);
+
frame = get_sigframe(ka, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
@@ -392,22 +400,23 @@
}
/* Set up registers for signal handler */
- __frame->sp = (unsigned long) frame;
- __frame->lr = (unsigned long) &frame->retcode;
- __frame->gr8 = sig;
- __frame->gr9 = (unsigned long) &frame->info;
-
if (current->personality & FDPIC_FUNCPTRS) {
struct fdpic_func_descriptor __user *funcptr =
(struct fdpic_func_descriptor __user *) ka->sa.sa_handler;
- __get_user(__frame->pc, &funcptr->text);
- __get_user(__frame->gr15, &funcptr->GOT);
+ struct fdpic_func_descriptor desc;
+ if (copy_from_user(&desc, funcptr, sizeof(desc)))
+ goto give_sigsegv;
+ __frame->pc = desc.text;
+ __frame->gr15 = desc.GOT;
} else {
__frame->pc = (unsigned long) ka->sa.sa_handler;
__frame->gr15 = 0;
}
- set_fs(USER_DS);
+ __frame->sp = (unsigned long) frame;
+ __frame->lr = (unsigned long) &frame->retcode;
+ __frame->gr8 = sig;
+ __frame->gr9 = (unsigned long) &frame->info;
/* the tracer may want to single-step inside the handler */
if (test_thread_flag(TIF_SINGLESTEP))
@@ -422,7 +431,7 @@
return 0;
give_sigsegv:
- force_sig(SIGSEGV, current);
+ force_sigsegv(sig, current);
return -EFAULT;
} /* end setup_rt_frame() */
@@ -437,7 +446,7 @@
int ret;
/* Are we from a system call? */
- if (in_syscall(__frame)) {
+ if (__frame->syscallno != -1) {
/* If so, check system call restarting.. */
switch (__frame->gr8) {
case -ERESTART_RESTARTBLOCK:
@@ -456,6 +465,7 @@
__frame->gr8 = __frame->orig_gr8;
__frame->pc -= 4;
}
+ __frame->syscallno = -1;
}
/* Set up the stack frame */
@@ -538,10 +548,11 @@
break;
case -ERESTART_RESTARTBLOCK:
- __frame->gr8 = __NR_restart_syscall;
+ __frame->gr7 = __NR_restart_syscall;
__frame->pc -= 4;
break;
}
+ __frame->syscallno = -1;
}
/* if there's no signal to deliver, we just put the saved sigmask
diff --git a/arch/ia64/include/asm/compat.h b/arch/ia64/include/asm/compat.h
index f90edc8..9301a28 100644
--- a/arch/ia64/include/asm/compat.h
+++ b/arch/ia64/include/asm/compat.h
@@ -199,7 +199,7 @@
}
static __inline__ void __user *
-compat_alloc_user_space (long len)
+arch_compat_alloc_user_space (long len)
{
struct pt_regs *regs = task_pt_regs(current);
return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len);
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index 3567d54..331d42b 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -420,22 +420,31 @@
;;
RSM_PSR_I(p0, r18, r19) // mask interrupt delivery
- mov ar.ccv=0
andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP
+ mov r8=EINVAL // default to EINVAL
#ifdef CONFIG_SMP
- mov r17=1
+ // __ticket_spin_trylock(r31)
+ ld4 r17=[r31]
;;
- cmpxchg4.acq r18=[r31],r17,ar.ccv // try to acquire the lock
- mov r8=EINVAL // default to EINVAL
+ mov.m ar.ccv=r17
+ extr.u r9=r17,17,15
+ adds r19=1,r17
+ extr.u r18=r17,0,15
;;
+ cmp.eq p6,p7=r9,r18
+ ;;
+(p6) cmpxchg4.acq r9=[r31],r19,ar.ccv
+(p6) dep.z r20=r19,1,15 // next serving ticket for unlock
+(p7) br.cond.spnt.many .lock_contention
+ ;;
+ cmp4.eq p0,p7=r9,r17
+ adds r31=2,r31
+(p7) br.cond.spnt.many .lock_contention
ld8 r3=[r2] // re-read current->blocked now that we hold the lock
- cmp4.ne p6,p0=r18,r0
-(p6) br.cond.spnt.many .lock_contention
;;
#else
ld8 r3=[r2] // re-read current->blocked now that we hold the lock
- mov r8=EINVAL // default to EINVAL
#endif
add r18=IA64_TASK_PENDING_OFFSET+IA64_SIGPENDING_SIGNAL_OFFSET,r16
add r19=IA64_TASK_SIGNAL_OFFSET,r16
@@ -490,7 +499,9 @@
(p6) br.cond.spnt.few 1b // yes -> retry
#ifdef CONFIG_SMP
- st4.rel [r31]=r0 // release the lock
+ // __ticket_spin_unlock(r31)
+ st2.rel [r31]=r20
+ mov r20=0 // i must not leak kernel bits...
#endif
SSM_PSR_I(p0, p9, r31)
;;
@@ -512,7 +523,8 @@
.sig_pending:
#ifdef CONFIG_SMP
- st4.rel [r31]=r0 // release the lock
+ // __ticket_spin_unlock(r31)
+ st2.rel [r31]=r20 // release the lock
#endif
SSM_PSR_I(p0, p9, r17)
;;
diff --git a/arch/m32r/include/asm/signal.h b/arch/m32r/include/asm/signal.h
index 9c1acb2..b2eeb0d 100644
--- a/arch/m32r/include/asm/signal.h
+++ b/arch/m32r/include/asm/signal.h
@@ -157,7 +157,6 @@
#undef __HAVE_ARCH_SIG_BITOPS
struct pt_regs;
-extern int do_signal(struct pt_regs *regs, sigset_t *oldset);
#define ptrace_signal_deliver(regs, cookie) do { } while (0)
diff --git a/arch/m32r/include/asm/unistd.h b/arch/m32r/include/asm/unistd.h
index 7612577..c705456 100644
--- a/arch/m32r/include/asm/unistd.h
+++ b/arch/m32r/include/asm/unistd.h
@@ -351,6 +351,7 @@
#define __ARCH_WANT_SYS_OLD_GETRLIMIT /*will be unused*/
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_RT_SIGACTION
+#define __ARCH_WANT_SYS_RT_SIGSUSPEND
#define __IGNORE_lchown
#define __IGNORE_setuid
diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S
index 4038698..225412b 100644
--- a/arch/m32r/kernel/entry.S
+++ b/arch/m32r/kernel/entry.S
@@ -235,10 +235,9 @@
work_notifysig: ; deal with pending signals and
; notify-resume requests
mv r0, sp ; arg1 : struct pt_regs *regs
- ldi r1, #0 ; arg2 : sigset_t *oldset
- mv r2, r9 ; arg3 : __u32 thread_info_flags
+ mv r1, r9 ; arg2 : __u32 thread_info_flags
bl do_notify_resume
- bra restore_all
+ bra resume_userspace
; perform syscall exit tracing
ALIGN
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
index e555091..0021ade 100644
--- a/arch/m32r/kernel/ptrace.c
+++ b/arch/m32r/kernel/ptrace.c
@@ -592,16 +592,17 @@
if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
!= sizeof(insn))
- break;
+ return -EIO;
compute_next_pc(insn, pc, &next_pc, child);
if (next_pc & 0x80000000)
- break;
+ return -EIO;
if (embed_debug_trap(child, next_pc))
- break;
+ return -EIO;
invalidate_cache();
+ return 0;
}
void user_disable_single_step(struct task_struct *child)
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c
index 144b0f12..7bbe386 100644
--- a/arch/m32r/kernel/signal.c
+++ b/arch/m32r/kernel/signal.c
@@ -28,37 +28,6 @@
#define DEBUG_SIG 0
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
-int do_signal(struct pt_regs *, sigset_t *);
-
-asmlinkage int
-sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize,
- unsigned long r2, unsigned long r3, unsigned long r4,
- unsigned long r5, unsigned long r6, struct pt_regs *regs)
-{
- sigset_t newset;
-
- /* XXX: Don't preclude handling different sized sigset_t's. */
- if (sigsetsize != sizeof(sigset_t))
- return -EINVAL;
-
- if (copy_from_user(&newset, unewset, sizeof(newset)))
- return -EFAULT;
- sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
-
- spin_lock_irq(¤t->sighand->siglock);
- current->saved_sigmask = current->blocked;
- current->blocked = newset;
- recalc_sigpending();
- spin_unlock_irq(¤t->sighand->siglock);
-
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- set_thread_flag(TIF_RESTORE_SIGMASK);
- return -ERESTARTNOHAND;
-}
-
asmlinkage int
sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
unsigned long r2, unsigned long r3, unsigned long r4,
@@ -218,7 +187,7 @@
return (void __user *)((sp - frame_size) & -8ul);
}
-static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
@@ -275,22 +244,34 @@
current->comm, current->pid, frame, regs->pc);
#endif
- return;
+ return 0;
give_sigsegv:
force_sigsegv(sig, current);
+ return -EFAULT;
+}
+
+static int prev_insn(struct pt_regs *regs)
+{
+ u16 inst;
+ if (get_user(&inst, (u16 __user *)(regs->bpc - 2)))
+ return -EFAULT;
+ if ((inst & 0xfff0) == 0x10f0) /* trap ? */
+ regs->bpc -= 2;
+ else
+ regs->bpc -= 4;
+ regs->syscall_nr = -1;
+ return 0;
}
/*
* OK, we're invoking a handler
*/
-static void
+static int
handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *oldset, struct pt_regs *regs)
{
- unsigned short inst;
-
/* Are we from a system call? */
if (regs->syscall_nr >= 0) {
/* If so, check system call restarting.. */
@@ -308,16 +289,14 @@
/* fallthrough */
case -ERESTARTNOINTR:
regs->r0 = regs->orig_r0;
- inst = *(unsigned short *)(regs->bpc - 2);
- if ((inst & 0xfff0) == 0x10f0) /* trap ? */
- regs->bpc -= 2;
- else
- regs->bpc -= 4;
+ if (prev_insn(regs) < 0)
+ return -EFAULT;
}
}
/* Set up the stack frame */
- setup_rt_frame(sig, ka, info, oldset, regs);
+ if (setup_rt_frame(sig, ka, info, oldset, regs))
+ return -EFAULT;
spin_lock_irq(¤t->sighand->siglock);
sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
@@ -325,6 +304,7 @@
sigaddset(¤t->blocked,sig);
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
+ return 0;
}
/*
@@ -332,12 +312,12 @@
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
-int do_signal(struct pt_regs *regs, sigset_t *oldset)
+static void do_signal(struct pt_regs *regs)
{
siginfo_t info;
int signr;
struct k_sigaction ka;
- unsigned short inst;
+ sigset_t *oldset;
/*
* We want the common case to go fast, which
@@ -346,12 +326,14 @@
* if so.
*/
if (!user_mode(regs))
- return 1;
+ return;
if (try_to_freeze())
goto no_signal;
- if (!oldset)
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ oldset = ¤t->saved_sigmask;
+ else
oldset = ¤t->blocked;
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
@@ -363,8 +345,10 @@
*/
/* Whee! Actually deliver the signal. */
- handle_signal(signr, &ka, &info, oldset, regs);
- return 1;
+ if (handle_signal(signr, &ka, &info, oldset, regs) == 0)
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+
+ return;
}
no_signal:
@@ -375,31 +359,24 @@
regs->r0 == -ERESTARTSYS ||
regs->r0 == -ERESTARTNOINTR) {
regs->r0 = regs->orig_r0;
- inst = *(unsigned short *)(regs->bpc - 2);
- if ((inst & 0xfff0) == 0x10f0) /* trap ? */
- regs->bpc -= 2;
- else
- regs->bpc -= 4;
- }
- if (regs->r0 == -ERESTART_RESTARTBLOCK){
+ prev_insn(regs);
+ } else if (regs->r0 == -ERESTART_RESTARTBLOCK){
regs->r0 = regs->orig_r0;
regs->r7 = __NR_restart_syscall;
- inst = *(unsigned short *)(regs->bpc - 2);
- if ((inst & 0xfff0) == 0x10f0) /* trap ? */
- regs->bpc -= 2;
- else
- regs->bpc -= 4;
+ prev_insn(regs);
}
}
- return 0;
+ if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+ sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL);
+ }
}
/*
* notification of userspace execution resumption
* - triggered by current->work.notify_resume
*/
-void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
- __u32 thread_info_flags)
+void do_notify_resume(struct pt_regs *regs, __u32 thread_info_flags)
{
/* Pending single-step? */
if (thread_info_flags & _TIF_SINGLESTEP)
@@ -407,7 +384,7 @@
/* deal with pending signal delivery */
if (thread_info_flags & _TIF_SIGPENDING)
- do_signal(regs,oldset);
+ do_signal(regs);
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 60b15d0..b43b36b 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -340,10 +340,13 @@
#define __NR_set_thread_area 334
#define __NR_atomic_cmpxchg_32 335
#define __NR_atomic_barrier 336
+#define __NR_fanotify_init 337
+#define __NR_fanotify_mark 338
+#define __NR_prlimit64 339
#ifdef __KERNEL__
-#define NR_syscalls 337
+#define NR_syscalls 340
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
index 2391bdf..6360c43 100644
--- a/arch/m68k/kernel/entry.S
+++ b/arch/m68k/kernel/entry.S
@@ -765,4 +765,7 @@
.long sys_set_thread_area
.long sys_atomic_cmpxchg_32 /* 335 */
.long sys_atomic_barrier
+ .long sys_fanotify_init
+ .long sys_fanotify_mark
+ .long sys_prlimit64
diff --git a/arch/m68knommu/kernel/syscalltable.S b/arch/m68knommu/kernel/syscalltable.S
index b30b3eb..79b1ed1 100644
--- a/arch/m68knommu/kernel/syscalltable.S
+++ b/arch/m68knommu/kernel/syscalltable.S
@@ -355,6 +355,9 @@
.long sys_set_thread_area
.long sys_atomic_cmpxchg_32 /* 335 */
.long sys_atomic_barrier
+ .long sys_fanotify_init
+ .long sys_fanotify_mark
+ .long sys_prlimit64
.rept NR_syscalls-(.-sys_call_table)/4
.long sys_ni_syscall
diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h
index 613f691..dbc5106 100644
--- a/arch/mips/include/asm/compat.h
+++ b/arch/mips/include/asm/compat.h
@@ -145,7 +145,7 @@
return (u32)(unsigned long)uptr;
}
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
{
struct pt_regs *regs = (struct pt_regs *)
((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1;
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 444b9f9..7c2a2f7 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -8,7 +8,6 @@
config MN10300
def_bool y
select HAVE_OPROFILE
- select HAVE_ARCH_TRACEHOOK
config AM33
def_bool y
diff --git a/arch/mn10300/Kconfig.debug b/arch/mn10300/Kconfig.debug
index ff80e86..ce83c74 100644
--- a/arch/mn10300/Kconfig.debug
+++ b/arch/mn10300/Kconfig.debug
@@ -101,7 +101,7 @@
choice
prompt "GDB stub port"
- default GDBSTUB_TTYSM0
+ default GDBSTUB_ON_TTYSM0
depends on GDBSTUB
help
Select the serial port used for GDB-stub.
diff --git a/arch/mn10300/include/asm/bitops.h b/arch/mn10300/include/asm/bitops.h
index f49ac49..3f50e96 100644
--- a/arch/mn10300/include/asm/bitops.h
+++ b/arch/mn10300/include/asm/bitops.h
@@ -229,9 +229,9 @@
#include <asm-generic/bitops/hweight.h>
#define ext2_set_bit_atomic(lock, nr, addr) \
- test_and_set_bit((nr) ^ 0x18, (addr))
+ test_and_set_bit((nr), (addr))
#define ext2_clear_bit_atomic(lock, nr, addr) \
- test_and_clear_bit((nr) ^ 0x18, (addr))
+ test_and_clear_bit((nr), (addr))
#include <asm-generic/bitops/ext2-non-atomic.h>
#include <asm-generic/bitops/minix-le.h>
diff --git a/arch/mn10300/include/asm/signal.h b/arch/mn10300/include/asm/signal.h
index 7e891fc..1865d72 100644
--- a/arch/mn10300/include/asm/signal.h
+++ b/arch/mn10300/include/asm/signal.h
@@ -78,7 +78,7 @@
/* These should not be considered constants from userland. */
#define SIGRTMIN 32
-#define SIGRTMAX (_NSIG-1)
+#define SIGRTMAX _NSIG
/*
* SA_FLAGS values:
diff --git a/arch/mn10300/kernel/mn10300-serial.c b/arch/mn10300/kernel/mn10300-serial.c
index 9d49073..db509dd 100644
--- a/arch/mn10300/kernel/mn10300-serial.c
+++ b/arch/mn10300/kernel/mn10300-serial.c
@@ -156,17 +156,17 @@
._intr = &SC0ICR,
._rxb = &SC0RXB,
._txb = &SC0TXB,
- .rx_name = "ttySM0/Rx",
- .tx_name = "ttySM0/Tx",
+ .rx_name = "ttySM0:Rx",
+ .tx_name = "ttySM0:Tx",
#ifdef CONFIG_MN10300_TTYSM0_TIMER8
- .tm_name = "ttySM0/Timer8",
+ .tm_name = "ttySM0:Timer8",
._tmxmd = &TM8MD,
._tmxbr = &TM8BR,
._tmicr = &TM8ICR,
.tm_irq = TM8IRQ,
.div_timer = MNSCx_DIV_TIMER_16BIT,
#else /* CONFIG_MN10300_TTYSM0_TIMER2 */
- .tm_name = "ttySM0/Timer2",
+ .tm_name = "ttySM0:Timer2",
._tmxmd = &TM2MD,
._tmxbr = (volatile u16 *) &TM2BR,
._tmicr = &TM2ICR,
@@ -209,17 +209,17 @@
._intr = &SC1ICR,
._rxb = &SC1RXB,
._txb = &SC1TXB,
- .rx_name = "ttySM1/Rx",
- .tx_name = "ttySM1/Tx",
+ .rx_name = "ttySM1:Rx",
+ .tx_name = "ttySM1:Tx",
#ifdef CONFIG_MN10300_TTYSM1_TIMER9
- .tm_name = "ttySM1/Timer9",
+ .tm_name = "ttySM1:Timer9",
._tmxmd = &TM9MD,
._tmxbr = &TM9BR,
._tmicr = &TM9ICR,
.tm_irq = TM9IRQ,
.div_timer = MNSCx_DIV_TIMER_16BIT,
#else /* CONFIG_MN10300_TTYSM1_TIMER3 */
- .tm_name = "ttySM1/Timer3",
+ .tm_name = "ttySM1:Timer3",
._tmxmd = &TM3MD,
._tmxbr = (volatile u16 *) &TM3BR,
._tmicr = &TM3ICR,
@@ -260,9 +260,9 @@
.uart.lock =
__SPIN_LOCK_UNLOCKED(mn10300_serial_port_sif2.uart.lock),
.name = "ttySM2",
- .rx_name = "ttySM2/Rx",
- .tx_name = "ttySM2/Tx",
- .tm_name = "ttySM2/Timer10",
+ .rx_name = "ttySM2:Rx",
+ .tx_name = "ttySM2:Tx",
+ .tm_name = "ttySM2:Timer10",
._iobase = &SC2CTR,
._control = &SC2CTR,
._status = &SC2STR,
diff --git a/arch/mn10300/kernel/signal.c b/arch/mn10300/kernel/signal.c
index 717db14..d4de05a 100644
--- a/arch/mn10300/kernel/signal.c
+++ b/arch/mn10300/kernel/signal.c
@@ -65,10 +65,10 @@
old_sigset_t mask;
if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
- __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+ __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
+ __get_user(mask, &act->sa_mask))
return -EFAULT;
- __get_user(new_ka.sa.sa_flags, &act->sa_flags);
- __get_user(mask, &act->sa_mask);
siginitset(&new_ka.sa.sa_mask, mask);
}
@@ -77,10 +77,10 @@
if (!ret && oact) {
if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
- __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+ __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
return -EFAULT;
- __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
- __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
}
return ret;
@@ -102,6 +102,9 @@
{
unsigned int err = 0;
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
if (is_using_fpu(current))
fpu_kill_state(current);
@@ -330,8 +333,6 @@
regs->d0 = sig;
regs->d1 = (unsigned long) &frame->sc;
- set_fs(USER_DS);
-
/* the tracer may want to single-step inside the handler */
if (test_thread_flag(TIF_SINGLESTEP))
ptrace_notify(SIGTRAP);
@@ -345,7 +346,7 @@
return 0;
give_sigsegv:
- force_sig(SIGSEGV, current);
+ force_sigsegv(sig, current);
return -EFAULT;
}
@@ -413,8 +414,6 @@
regs->d0 = sig;
regs->d1 = (long) &frame->info;
- set_fs(USER_DS);
-
/* the tracer may want to single-step inside the handler */
if (test_thread_flag(TIF_SINGLESTEP))
ptrace_notify(SIGTRAP);
@@ -428,10 +427,16 @@
return 0;
give_sigsegv:
- force_sig(SIGSEGV, current);
+ force_sigsegv(sig, current);
return -EFAULT;
}
+static inline void stepback(struct pt_regs *regs)
+{
+ regs->pc -= 2;
+ regs->orig_d0 = -1;
+}
+
/*
* handle the actual delivery of a signal to userspace
*/
@@ -459,7 +464,7 @@
/* fallthrough */
case -ERESTARTNOINTR:
regs->d0 = regs->orig_d0;
- regs->pc -= 2;
+ stepback(regs);
}
}
@@ -527,12 +532,12 @@
case -ERESTARTSYS:
case -ERESTARTNOINTR:
regs->d0 = regs->orig_d0;
- regs->pc -= 2;
+ stepback(regs);
break;
case -ERESTART_RESTARTBLOCK:
regs->d0 = __NR_restart_syscall;
- regs->pc -= 2;
+ stepback(regs);
break;
}
}
diff --git a/arch/mn10300/mm/Makefile b/arch/mn10300/mm/Makefile
index 28b9d98..1557277 100644
--- a/arch/mn10300/mm/Makefile
+++ b/arch/mn10300/mm/Makefile
@@ -2,13 +2,11 @@
# Makefile for the MN10300-specific memory management code
#
+cacheflush-y := cache.o cache-mn10300.o
+cacheflush-$(CONFIG_MN10300_CACHE_WBACK) += cache-flush-mn10300.o
+
+cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o
+
obj-y := \
init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \
- misalignment.o dma-alloc.o
-
-ifneq ($(CONFIG_MN10300_CACHE_DISABLED),y)
-obj-y += cache.o cache-mn10300.o
-ifeq ($(CONFIG_MN10300_CACHE_WBACK),y)
-obj-y += cache-flush-mn10300.o
-endif
-endif
+ misalignment.o dma-alloc.o $(cacheflush-y)
diff --git a/arch/mn10300/mm/cache-disabled.c b/arch/mn10300/mm/cache-disabled.c
new file mode 100644
index 0000000..f669ea4
--- /dev/null
+++ b/arch/mn10300/mm/cache-disabled.c
@@ -0,0 +1,21 @@
+/* Handle the cache being disabled
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/mm.h>
+
+/*
+ * allow userspace to flush the instruction cache
+ */
+asmlinkage long sys_cacheflush(unsigned long start, unsigned long end)
+{
+ if (end < start)
+ return -EINVAL;
+ return 0;
+}
diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h
index 02b77ba..efa0b60c 100644
--- a/arch/parisc/include/asm/compat.h
+++ b/arch/parisc/include/asm/compat.h
@@ -147,7 +147,7 @@
return (u32)(unsigned long)uptr;
}
-static __inline__ void __user *compat_alloc_user_space(long len)
+static __inline__ void __user *arch_compat_alloc_user_space(long len)
{
struct pt_regs *regs = ¤t->thread.regs;
return (void __user *)regs->gr[30];
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
index 396d21a..a11d4ea 100644
--- a/arch/powerpc/include/asm/compat.h
+++ b/arch/powerpc/include/asm/compat.h
@@ -134,7 +134,7 @@
return (u32)(unsigned long)uptr;
}
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
{
struct pt_regs *regs = current->thread.regs;
unsigned long usp = regs->gpr[1];
diff --git a/arch/powerpc/include/asm/fsldma.h b/arch/powerpc/include/asm/fsldma.h
index a67aeed..debc5ed 100644
--- a/arch/powerpc/include/asm/fsldma.h
+++ b/arch/powerpc/include/asm/fsldma.h
@@ -11,6 +11,7 @@
#ifndef __ARCH_POWERPC_ASM_FSLDMA_H__
#define __ARCH_POWERPC_ASM_FSLDMA_H__
+#include <linux/slab.h>
#include <linux/dmaengine.h>
/*
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 7109f5b..2300426 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -138,6 +138,7 @@
ti->local_flags &= ~_TLF_RESTORE_SIGMASK;
sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL);
}
+ regs->trap = 0;
return 0; /* no signals delivered */
}
@@ -164,6 +165,7 @@
ret = handle_rt_signal64(signr, &ka, &info, oldset, regs);
}
+ regs->trap = 0;
if (ret) {
spin_lock_irq(¤t->sighand->siglock);
sigorsets(¤t->blocked, ¤t->blocked,
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 2666101..b96a3a0 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -511,6 +511,7 @@
if (!sig)
save_r2 = (unsigned int)regs->gpr[2];
err = restore_general_regs(regs, sr);
+ regs->trap = 0;
err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
if (!sig)
regs->gpr[2] = (unsigned long) save_r2;
@@ -884,7 +885,6 @@
regs->nip = (unsigned long) ka->sa.sa_handler;
/* enter the signal handler in big-endian mode */
regs->msr &= ~MSR_LE;
- regs->trap = 0;
return 1;
badframe:
@@ -1228,7 +1228,6 @@
regs->nip = (unsigned long) ka->sa.sa_handler;
/* enter the signal handler in big-endian mode */
regs->msr &= ~MSR_LE;
- regs->trap = 0;
return 1;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 2fe6fc6..27c4a45 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -178,7 +178,7 @@
err |= __get_user(regs->xer, &sc->gp_regs[PT_XER]);
err |= __get_user(regs->ccr, &sc->gp_regs[PT_CCR]);
/* skip SOFTE */
- err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]);
+ regs->trap = 0;
err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index 104f200..a875c2f 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -181,7 +181,7 @@
#endif
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
{
unsigned long stack;
diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h
index 5016f76..6f57325 100644
--- a/arch/sparc/include/asm/compat.h
+++ b/arch/sparc/include/asm/compat.h
@@ -167,7 +167,7 @@
return (u32)(unsigned long)uptr;
}
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
{
struct pt_regs *regs = current_thread_info()->kregs;
unsigned long usp = regs->u_regs[UREG_I6];
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 357ced3..6318e62 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1038,6 +1038,7 @@
if (atomic_read(&nmi_active) < 0)
return -ENODEV;
+ pmap = NULL;
if (attr->type == PERF_TYPE_HARDWARE) {
if (attr->config >= sparc_pmu->max_events)
return -EINVAL;
@@ -1046,9 +1047,18 @@
pmap = sparc_map_cache_event(attr->config);
if (IS_ERR(pmap))
return PTR_ERR(pmap);
- } else
+ } else if (attr->type != PERF_TYPE_RAW)
return -EOPNOTSUPP;
+ if (pmap) {
+ hwc->event_base = perf_event_encode(pmap);
+ } else {
+ /* User gives us "(encoding << 16) | pic_mask" for
+ * PERF_TYPE_RAW events.
+ */
+ hwc->event_base = attr->config;
+ }
+
/* We save the enable bits in the config_base. */
hwc->config_base = sparc_pmu->irq_bit;
if (!attr->exclude_user)
@@ -1058,8 +1068,6 @@
if (!attr->exclude_hv)
hwc->config_base |= sparc_pmu->hv_bit;
- hwc->event_base = perf_event_encode(pmap);
-
n = 0;
if (event->group_leader != event) {
n = collect_events(event->group_leader,
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index ea22cd3..75fad42 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -453,8 +453,66 @@
return err;
}
-static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
- int signo, sigset_t *oldset)
+/* The I-cache flush instruction only works in the primary ASI, which
+ * right now is the nucleus, aka. kernel space.
+ *
+ * Therefore we have to kick the instructions out using the kernel
+ * side linear mapping of the physical address backing the user
+ * instructions.
+ */
+static void flush_signal_insns(unsigned long address)
+{
+ unsigned long pstate, paddr;
+ pte_t *ptep, pte;
+ pgd_t *pgdp;
+ pud_t *pudp;
+ pmd_t *pmdp;
+
+ /* Commit all stores of the instructions we are about to flush. */
+ wmb();
+
+ /* Disable cross-call reception. In this way even a very wide
+ * munmap() on another cpu can't tear down the page table
+ * hierarchy from underneath us, since that can't complete
+ * until the IPI tlb flush returns.
+ */
+
+ __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
+ __asm__ __volatile__("wrpr %0, %1, %%pstate"
+ : : "r" (pstate), "i" (PSTATE_IE));
+
+ pgdp = pgd_offset(current->mm, address);
+ if (pgd_none(*pgdp))
+ goto out_irqs_on;
+ pudp = pud_offset(pgdp, address);
+ if (pud_none(*pudp))
+ goto out_irqs_on;
+ pmdp = pmd_offset(pudp, address);
+ if (pmd_none(*pmdp))
+ goto out_irqs_on;
+
+ ptep = pte_offset_map(pmdp, address);
+ pte = *ptep;
+ if (!pte_present(pte))
+ goto out_unmap;
+
+ paddr = (unsigned long) page_address(pte_page(pte));
+
+ __asm__ __volatile__("flush %0 + %1"
+ : /* no outputs */
+ : "r" (paddr),
+ "r" (address & (PAGE_SIZE - 1))
+ : "memory");
+
+out_unmap:
+ pte_unmap(ptep);
+out_irqs_on:
+ __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
+
+}
+
+static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
+ int signo, sigset_t *oldset)
{
struct signal_frame32 __user *sf;
int sigframe_size;
@@ -547,13 +605,7 @@
if (ka->ka_restorer) {
regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
} else {
- /* Flush instruction space. */
unsigned long address = ((unsigned long)&(sf->insns[0]));
- pgd_t *pgdp = pgd_offset(current->mm, address);
- pud_t *pudp = pud_offset(pgdp, address);
- pmd_t *pmdp = pmd_offset(pudp, address);
- pte_t *ptep;
- pte_t pte;
regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
@@ -562,34 +614,22 @@
if (err)
goto sigsegv;
- preempt_disable();
- ptep = pte_offset_map(pmdp, address);
- pte = *ptep;
- if (pte_present(pte)) {
- unsigned long page = (unsigned long)
- page_address(pte_page(pte));
-
- wmb();
- __asm__ __volatile__("flush %0 + %1"
- : /* no outputs */
- : "r" (page),
- "r" (address & (PAGE_SIZE - 1))
- : "memory");
- }
- pte_unmap(ptep);
- preempt_enable();
+ flush_signal_insns(address);
}
- return;
+ return 0;
sigill:
do_exit(SIGILL);
+ return -EINVAL;
+
sigsegv:
force_sigsegv(signo, current);
+ return -EFAULT;
}
-static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
- unsigned long signr, sigset_t *oldset,
- siginfo_t *info)
+static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
+ unsigned long signr, sigset_t *oldset,
+ siginfo_t *info)
{
struct rt_signal_frame32 __user *sf;
int sigframe_size;
@@ -687,12 +727,7 @@
if (ka->ka_restorer)
regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
else {
- /* Flush instruction space. */
unsigned long address = ((unsigned long)&(sf->insns[0]));
- pgd_t *pgdp = pgd_offset(current->mm, address);
- pud_t *pudp = pud_offset(pgdp, address);
- pmd_t *pmdp = pmd_offset(pudp, address);
- pte_t *ptep;
regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
@@ -704,38 +739,32 @@
if (err)
goto sigsegv;
- preempt_disable();
- ptep = pte_offset_map(pmdp, address);
- if (pte_present(*ptep)) {
- unsigned long page = (unsigned long)
- page_address(pte_page(*ptep));
-
- wmb();
- __asm__ __volatile__("flush %0 + %1"
- : /* no outputs */
- : "r" (page),
- "r" (address & (PAGE_SIZE - 1))
- : "memory");
- }
- pte_unmap(ptep);
- preempt_enable();
+ flush_signal_insns(address);
}
- return;
+ return 0;
sigill:
do_exit(SIGILL);
+ return -EINVAL;
+
sigsegv:
force_sigsegv(signr, current);
+ return -EFAULT;
}
-static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka,
- siginfo_t *info,
- sigset_t *oldset, struct pt_regs *regs)
+static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka,
+ siginfo_t *info,
+ sigset_t *oldset, struct pt_regs *regs)
{
+ int err;
+
if (ka->sa.sa_flags & SA_SIGINFO)
- setup_rt_frame32(ka, regs, signr, oldset, info);
+ err = setup_rt_frame32(ka, regs, signr, oldset, info);
else
- setup_frame32(ka, regs, signr, oldset);
+ err = setup_frame32(ka, regs, signr, oldset);
+
+ if (err)
+ return err;
spin_lock_irq(¤t->sighand->siglock);
sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
@@ -743,6 +772,10 @@
sigaddset(¤t->blocked,signr);
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
+
+ tracehook_signal_handler(signr, info, ka, regs, 0);
+
+ return 0;
}
static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs,
@@ -789,16 +822,14 @@
if (signr > 0) {
if (restart_syscall)
syscall_restart32(orig_i0, regs, &ka.sa);
- handle_signal32(signr, &ka, &info, oldset, regs);
-
- /* A signal was successfully delivered; the saved
- * sigmask will have been stored in the signal frame,
- * and will be restored by sigreturn, so we can simply
- * clear the TS_RESTORE_SIGMASK flag.
- */
- current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-
- tracehook_signal_handler(signr, &info, &ka, regs, 0);
+ if (handle_signal32(signr, &ka, &info, oldset, regs) == 0) {
+ /* A signal was successfully delivered; the saved
+ * sigmask will have been stored in the signal frame,
+ * and will be restored by sigreturn, so we can simply
+ * clear the TS_RESTORE_SIGMASK flag.
+ */
+ current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+ }
return;
}
if (restart_syscall &&
@@ -809,12 +840,14 @@
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
regs->tnpc -= 4;
+ pt_regs_clear_syscall(regs);
}
if (restart_syscall &&
regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->tpc -= 4;
regs->tnpc -= 4;
+ pt_regs_clear_syscall(regs);
}
/* If there's no signal to deliver, we just put the saved sigmask
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 9882df9..5e5c5fd 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -315,8 +315,8 @@
return err;
}
-static void setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
- int signo, sigset_t *oldset)
+static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
+ int signo, sigset_t *oldset)
{
struct signal_frame __user *sf;
int sigframe_size, err;
@@ -384,16 +384,19 @@
/* Flush instruction space. */
flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
}
- return;
+ return 0;
sigill_and_return:
do_exit(SIGILL);
+ return -EINVAL;
+
sigsegv:
force_sigsegv(signo, current);
+ return -EFAULT;
}
-static void setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
- int signo, sigset_t *oldset, siginfo_t *info)
+static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
+ int signo, sigset_t *oldset, siginfo_t *info)
{
struct rt_signal_frame __user *sf;
int sigframe_size;
@@ -466,22 +469,30 @@
/* Flush instruction space. */
flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
}
- return;
+ return 0;
sigill:
do_exit(SIGILL);
+ return -EINVAL;
+
sigsegv:
force_sigsegv(signo, current);
+ return -EFAULT;
}
-static inline void
+static inline int
handle_signal(unsigned long signr, struct k_sigaction *ka,
siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
{
+ int err;
+
if (ka->sa.sa_flags & SA_SIGINFO)
- setup_rt_frame(ka, regs, signr, oldset, info);
+ err = setup_rt_frame(ka, regs, signr, oldset, info);
else
- setup_frame(ka, regs, signr, oldset);
+ err = setup_frame(ka, regs, signr, oldset);
+
+ if (err)
+ return err;
spin_lock_irq(¤t->sighand->siglock);
sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
@@ -489,6 +500,10 @@
sigaddset(¤t->blocked, signr);
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
+
+ tracehook_signal_handler(signr, info, ka, regs, 0);
+
+ return 0;
}
static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@@ -546,17 +561,15 @@
if (signr > 0) {
if (restart_syscall)
syscall_restart(orig_i0, regs, &ka.sa);
- handle_signal(signr, &ka, &info, oldset, regs);
-
- /* a signal was successfully delivered; the saved
- * sigmask will have been stored in the signal frame,
- * and will be restored by sigreturn, so we can simply
- * clear the TIF_RESTORE_SIGMASK flag.
- */
- if (test_thread_flag(TIF_RESTORE_SIGMASK))
- clear_thread_flag(TIF_RESTORE_SIGMASK);
-
- tracehook_signal_handler(signr, &info, &ka, regs, 0);
+ if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
+ /* a signal was successfully delivered; the saved
+ * sigmask will have been stored in the signal frame,
+ * and will be restored by sigreturn, so we can simply
+ * clear the TIF_RESTORE_SIGMASK flag.
+ */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+ }
return;
}
if (restart_syscall &&
@@ -567,12 +580,14 @@
regs->u_regs[UREG_I0] = orig_i0;
regs->pc -= 4;
regs->npc -= 4;
+ pt_regs_clear_syscall(regs);
}
if (restart_syscall &&
regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->pc -= 4;
regs->npc -= 4;
+ pt_regs_clear_syscall(regs);
}
/* if there's no signal to deliver, we just put the saved sigmask
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 9fa48c3..006fe45 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -409,7 +409,7 @@
return (void __user *) sp;
}
-static inline void
+static inline int
setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
int signo, sigset_t *oldset, siginfo_t *info)
{
@@ -483,26 +483,37 @@
}
/* 4. return to kernel instructions */
regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
- return;
+ return 0;
sigill:
do_exit(SIGILL);
+ return -EINVAL;
+
sigsegv:
force_sigsegv(signo, current);
+ return -EFAULT;
}
-static inline void handle_signal(unsigned long signr, struct k_sigaction *ka,
- siginfo_t *info,
- sigset_t *oldset, struct pt_regs *regs)
+static inline int handle_signal(unsigned long signr, struct k_sigaction *ka,
+ siginfo_t *info,
+ sigset_t *oldset, struct pt_regs *regs)
{
- setup_rt_frame(ka, regs, signr, oldset,
- (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
+ int err;
+
+ err = setup_rt_frame(ka, regs, signr, oldset,
+ (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
+ if (err)
+ return err;
spin_lock_irq(¤t->sighand->siglock);
sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NOMASK))
sigaddset(¤t->blocked,signr);
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
+
+ tracehook_signal_handler(signr, info, ka, regs, 0);
+
+ return 0;
}
static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@@ -571,16 +582,14 @@
if (signr > 0) {
if (restart_syscall)
syscall_restart(orig_i0, regs, &ka.sa);
- handle_signal(signr, &ka, &info, oldset, regs);
-
- /* A signal was successfully delivered; the saved
- * sigmask will have been stored in the signal frame,
- * and will be restored by sigreturn, so we can simply
- * clear the TS_RESTORE_SIGMASK flag.
- */
- current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-
- tracehook_signal_handler(signr, &info, &ka, regs, 0);
+ if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
+ /* A signal was successfully delivered; the saved
+ * sigmask will have been stored in the signal frame,
+ * and will be restored by sigreturn, so we can simply
+ * clear the TS_RESTORE_SIGMASK flag.
+ */
+ current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+ }
return;
}
if (restart_syscall &&
@@ -591,12 +600,14 @@
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
regs->tnpc -= 4;
+ pt_regs_clear_syscall(regs);
}
if (restart_syscall &&
regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->tpc -= 4;
regs->tnpc -= 4;
+ pt_regs_clear_syscall(regs);
}
/* If there's no signal to deliver, we just put the saved sigmask
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
index 5079413..675c9e1 100644
--- a/arch/sparc/kernel/sys_sparc_32.c
+++ b/arch/sparc/kernel/sys_sparc_32.c
@@ -166,7 +166,6 @@
{
siginfo_t info;
- lock_kernel();
#ifdef DEBUG_SPARC_BREAKPOINT
printk ("TRAP: Entering kernel PC=%x, nPC=%x\n", regs->pc, regs->npc);
#endif
@@ -180,7 +179,6 @@
#ifdef DEBUG_SPARC_BREAKPOINT
printk ("TRAP: Returning to space: PC=%x nPC=%x\n", regs->pc, regs->npc);
#endif
- unlock_kernel();
}
asmlinkage int
diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c
index f8514e2..12b9f35 100644
--- a/arch/sparc/kernel/unaligned_32.c
+++ b/arch/sparc/kernel/unaligned_32.c
@@ -323,7 +323,6 @@
{
enum direction dir;
- lock_kernel();
if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) ||
(((insn >> 30) & 3) != 3))
goto kill_user;
@@ -377,5 +376,5 @@
kill_user:
user_mna_trap_fault(regs, insn);
out:
- unlock_kernel();
+ ;
}
diff --git a/arch/sparc/kernel/windows.c b/arch/sparc/kernel/windows.c
index f24d298..b351770 100644
--- a/arch/sparc/kernel/windows.c
+++ b/arch/sparc/kernel/windows.c
@@ -112,7 +112,6 @@
struct thread_info *tp = current_thread_info();
int window;
- lock_kernel();
flush_user_windows();
for(window = 0; window < tp->w_saved; window++) {
unsigned long sp = tp->rwbuf_stkptrs[window];
@@ -123,5 +122,4 @@
do_exit(SIGILL);
}
tp->w_saved = 0;
- unlock_kernel();
}
diff --git a/arch/tile/include/arch/chip_tile64.h b/arch/tile/include/arch/chip_tile64.h
index 1246573..261aaba 100644
--- a/arch/tile/include/arch/chip_tile64.h
+++ b/arch/tile/include/arch/chip_tile64.h
@@ -150,6 +150,9 @@
/** Is the PROC_STATUS SPR supported? */
#define CHIP_HAS_PROC_STATUS_SPR() 0
+/** Is the DSTREAM_PF SPR supported? */
+#define CHIP_HAS_DSTREAM_PF() 0
+
/** Log of the number of mshims we have. */
#define CHIP_LOG_NUM_MSHIMS() 2
diff --git a/arch/tile/include/arch/chip_tilepro.h b/arch/tile/include/arch/chip_tilepro.h
index e864c47..7001769 100644
--- a/arch/tile/include/arch/chip_tilepro.h
+++ b/arch/tile/include/arch/chip_tilepro.h
@@ -150,6 +150,9 @@
/** Is the PROC_STATUS SPR supported? */
#define CHIP_HAS_PROC_STATUS_SPR() 1
+/** Is the DSTREAM_PF SPR supported? */
+#define CHIP_HAS_DSTREAM_PF() 0
+
/** Log of the number of mshims we have. */
#define CHIP_LOG_NUM_MSHIMS() 2
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
index 5a34da6..8b60ec8 100644
--- a/arch/tile/include/asm/compat.h
+++ b/arch/tile/include/asm/compat.h
@@ -195,7 +195,7 @@
return (long)(int)(long __force)uptr;
}
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
{
struct pt_regs *regs = task_pt_regs(current);
return (void __user *)regs->sp - len;
@@ -214,8 +214,9 @@
struct compat_sigaction;
struct compat_siginfo;
struct compat_sigaltstack;
-long compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
- compat_uptr_t __user *envp);
+long compat_sys_execve(const char __user *path,
+ const compat_uptr_t __user *argv,
+ const compat_uptr_t __user *envp);
long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act,
struct compat_sigaction __user *oact,
size_t sigsetsize);
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h
index 8c95bef..ee43328 100644
--- a/arch/tile/include/asm/io.h
+++ b/arch/tile/include/asm/io.h
@@ -164,22 +164,22 @@
#define iowrite32 writel
#define iowrite64 writeq
-static inline void *memcpy_fromio(void *dst, void *src, int len)
+static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
+ size_t len)
{
int x;
BUG_ON((unsigned long)src & 0x3);
for (x = 0; x < len; x += 4)
*(u32 *)(dst + x) = readl(src + x);
- return dst;
}
-static inline void *memcpy_toio(void *dst, void *src, int len)
+static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
+ size_t len)
{
int x;
BUG_ON((unsigned long)dst & 0x3);
for (x = 0; x < len; x += 4)
writel(*(u32 *)(src + x), dst + x);
- return dst;
}
/*
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index d942d09..ccd5f84 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -103,6 +103,18 @@
/* Any other miscellaneous processor state bits */
unsigned long proc_status;
#endif
+#if !CHIP_HAS_FIXED_INTVEC_BASE()
+ /* Interrupt base for PL0 interrupts */
+ unsigned long interrupt_vector_base;
+#endif
+#if CHIP_HAS_TILE_RTF_HWM()
+ /* Tile cache retry fifo high-water mark */
+ unsigned long tile_rtf_hwm;
+#endif
+#if CHIP_HAS_DSTREAM_PF()
+ /* Data stream prefetch control */
+ unsigned long dstream_pf;
+#endif
#ifdef CONFIG_HARDWALL
/* Is this task tied to an activated hardwall? */
struct hardwall_info *hardwall;
diff --git a/arch/tile/include/asm/ptrace.h b/arch/tile/include/asm/ptrace.h
index acdae81..4a02bb0 100644
--- a/arch/tile/include/asm/ptrace.h
+++ b/arch/tile/include/asm/ptrace.h
@@ -51,10 +51,7 @@
/*
* This struct defines the way the registers are stored on the stack during a
- * system call/exception. It should be a multiple of 8 bytes to preserve
- * normal stack alignment rules.
- *
- * Must track <sys/ucontext.h> and <sys/procfs.h>
+ * system call or exception. "struct sigcontext" has the same shape.
*/
struct pt_regs {
/* Saved main processor registers; 56..63 are special. */
@@ -80,11 +77,6 @@
#endif /* __ASSEMBLY__ */
-/* Flag bits in pt_regs.flags */
-#define PT_FLAGS_DISABLE_IRQ 1 /* on return to kernel, disable irqs */
-#define PT_FLAGS_CALLER_SAVES 2 /* caller-save registers are valid */
-#define PT_FLAGS_RESTORE_REGS 4 /* restore callee-save regs on return */
-
#define PTRACE_GETREGS 12
#define PTRACE_SETREGS 13
#define PTRACE_GETFPREGS 14
@@ -101,6 +93,11 @@
#ifdef __KERNEL__
+/* Flag bits in pt_regs.flags */
+#define PT_FLAGS_DISABLE_IRQ 1 /* on return to kernel, disable irqs */
+#define PT_FLAGS_CALLER_SAVES 2 /* caller-save registers are valid */
+#define PT_FLAGS_RESTORE_REGS 4 /* restore callee-save regs on return */
+
#ifndef __ASSEMBLY__
#define instruction_pointer(regs) ((regs)->pc)
diff --git a/arch/tile/include/asm/sigcontext.h b/arch/tile/include/asm/sigcontext.h
index 7cd7672..5e2d033 100644
--- a/arch/tile/include/asm/sigcontext.h
+++ b/arch/tile/include/asm/sigcontext.h
@@ -15,13 +15,21 @@
#ifndef _ASM_TILE_SIGCONTEXT_H
#define _ASM_TILE_SIGCONTEXT_H
-/* NOTE: we can't include <linux/ptrace.h> due to #include dependencies. */
-#include <asm/ptrace.h>
+#include <arch/abi.h>
-/* Must track <sys/ucontext.h> */
-
+/*
+ * struct sigcontext has the same shape as struct pt_regs,
+ * but is simplified since we know the fault is from userspace.
+ */
struct sigcontext {
- struct pt_regs regs;
+ uint_reg_t gregs[53]; /* General-purpose registers. */
+ uint_reg_t tp; /* Aliases gregs[TREG_TP]. */
+ uint_reg_t sp; /* Aliases gregs[TREG_SP]. */
+ uint_reg_t lr; /* Aliases gregs[TREG_LR]. */
+ uint_reg_t pc; /* Program counter. */
+ uint_reg_t ics; /* In Interrupt Critical Section? */
+ uint_reg_t faultnum; /* Fault number. */
+ uint_reg_t pad[5];
};
#endif /* _ASM_TILE_SIGCONTEXT_H */
diff --git a/arch/tile/include/asm/signal.h b/arch/tile/include/asm/signal.h
index eb0253f..c1ee1d6 100644
--- a/arch/tile/include/asm/signal.h
+++ b/arch/tile/include/asm/signal.h
@@ -24,6 +24,7 @@
#include <asm-generic/signal.h>
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+struct pt_regs;
int restore_sigcontext(struct pt_regs *, struct sigcontext __user *, long *);
int setup_sigcontext(struct sigcontext __user *, struct pt_regs *);
void do_signal(struct pt_regs *regs);
diff --git a/arch/tile/include/asm/syscalls.h b/arch/tile/include/asm/syscalls.h
index af165a7..ce99ffe 100644
--- a/arch/tile/include/asm/syscalls.h
+++ b/arch/tile/include/asm/syscalls.h
@@ -62,10 +62,12 @@
long _sys_fork(struct pt_regs *regs);
long sys_vfork(void);
long _sys_vfork(struct pt_regs *regs);
-long sys_execve(char __user *filename, char __user * __user *argv,
- char __user * __user *envp);
-long _sys_execve(char __user *filename, char __user * __user *argv,
- char __user * __user *envp, struct pt_regs *regs);
+long sys_execve(const char __user *filename,
+ const char __user *const __user *argv,
+ const char __user *const __user *envp);
+long _sys_execve(const char __user *filename,
+ const char __user *const __user *argv,
+ const char __user *const __user *envp, struct pt_regs *regs);
/* kernel/signal.c */
long sys_sigaltstack(const stack_t __user *, stack_t __user *);
@@ -86,10 +88,13 @@
#endif
#ifdef CONFIG_COMPAT
-long compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
- compat_uptr_t __user *envp);
-long _compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
- compat_uptr_t __user *envp, struct pt_regs *regs);
+long compat_sys_execve(const char __user *path,
+ const compat_uptr_t __user *argv,
+ const compat_uptr_t __user *envp);
+long _compat_sys_execve(const char __user *path,
+ const compat_uptr_t __user *argv,
+ const compat_uptr_t __user *envp,
+ struct pt_regs *regs);
long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
struct compat_sigaltstack __user *uoss_ptr);
long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index 84f296c..8f58bdf 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -1506,13 +1506,6 @@
}
STD_ENDPROC(handle_ill)
- .pushsection .rodata, "a"
- .align 8
-bpt_code:
- bpt
- ENDPROC(bpt_code)
- .popsection
-
/* Various stub interrupt handlers and syscall handlers */
STD_ENTRY_LOCAL(_kernel_double_fault)
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 985cc28c..84c2911 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -408,6 +408,15 @@
#if CHIP_HAS_PROC_STATUS_SPR()
t->proc_status = __insn_mfspr(SPR_PROC_STATUS);
#endif
+#if !CHIP_HAS_FIXED_INTVEC_BASE()
+ t->interrupt_vector_base = __insn_mfspr(SPR_INTERRUPT_VECTOR_BASE_0);
+#endif
+#if CHIP_HAS_TILE_RTF_HWM()
+ t->tile_rtf_hwm = __insn_mfspr(SPR_TILE_RTF_HWM);
+#endif
+#if CHIP_HAS_DSTREAM_PF()
+ t->dstream_pf = __insn_mfspr(SPR_DSTREAM_PF);
+#endif
}
static void restore_arch_state(const struct thread_struct *t)
@@ -428,14 +437,14 @@
#if CHIP_HAS_PROC_STATUS_SPR()
__insn_mtspr(SPR_PROC_STATUS, t->proc_status);
#endif
+#if !CHIP_HAS_FIXED_INTVEC_BASE()
+ __insn_mtspr(SPR_INTERRUPT_VECTOR_BASE_0, t->interrupt_vector_base);
+#endif
#if CHIP_HAS_TILE_RTF_HWM()
- /*
- * Clear this whenever we switch back to a process in case
- * the previous process was monkeying with it. Even if enabled
- * in CBOX_MSR1 via TILE_RTF_HWM_MIN, it's still just a
- * performance hint, so isn't worth a full save/restore.
- */
- __insn_mtspr(SPR_TILE_RTF_HWM, 0);
+ __insn_mtspr(SPR_TILE_RTF_HWM, t->tile_rtf_hwm);
+#endif
+#if CHIP_HAS_DSTREAM_PF()
+ __insn_mtspr(SPR_DSTREAM_PF, t->dstream_pf);
#endif
}
@@ -561,8 +570,9 @@
}
#ifdef CONFIG_COMPAT
-long _compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
- compat_uptr_t __user *envp, struct pt_regs *regs)
+long _compat_sys_execve(const char __user *path,
+ const compat_uptr_t __user *argv,
+ const compat_uptr_t __user *envp, struct pt_regs *regs)
{
long error;
char *filename;
@@ -657,7 +667,7 @@
regs->regs[51], regs->regs[52], regs->tp);
pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
#else
- for (i = 0; i < 52; i += 3)
+ for (i = 0; i < 52; i += 4)
pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
i, regs->regs[i], i+1, regs->regs[i+1],
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
index 45b66a3..ce183aa 100644
--- a/arch/tile/kernel/signal.c
+++ b/arch/tile/kernel/signal.c
@@ -61,13 +61,19 @@
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ /*
+ * Enforce that sigcontext is like pt_regs, and doesn't mess
+ * up our stack alignment rules.
+ */
+ BUILD_BUG_ON(sizeof(struct sigcontext) != sizeof(struct pt_regs));
+ BUILD_BUG_ON(sizeof(struct sigcontext) % 8 != 0);
+
for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
- err |= __get_user(((long *)regs)[i],
- &((long __user *)(&sc->regs))[i]);
+ err |= __get_user(regs->regs[i], &sc->gregs[i]);
regs->faultnum = INT_SWINT_1_SIGRETURN;
- err |= __get_user(*pr0, &sc->regs.regs[0]);
+ err |= __get_user(*pr0, &sc->gregs[0]);
return err;
}
@@ -112,8 +118,7 @@
int i, err = 0;
for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
- err |= __put_user(((long *)regs)[i],
- &((long __user *)(&sc->regs))[i]);
+ err |= __put_user(regs->regs[i], &sc->gregs[i]);
return err;
}
@@ -203,19 +208,17 @@
* Set up registers for signal handler.
* Registers that we don't modify keep the value they had from
* user-space at the time we took the signal.
+ * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
+ * since some things rely on this (e.g. glibc's debug/segfault.c).
*/
regs->pc = (unsigned long) ka->sa.sa_handler;
regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */
regs->sp = (unsigned long) frame;
regs->lr = restorer;
regs->regs[0] = (unsigned long) usig;
-
- if (ka->sa.sa_flags & SA_SIGINFO) {
- /* Need extra arguments, so mark to restore caller-saves. */
- regs->regs[1] = (unsigned long) &frame->info;
- regs->regs[2] = (unsigned long) &frame->uc;
- regs->flags |= PT_FLAGS_CALLER_SAVES;
- }
+ regs->regs[1] = (unsigned long) &frame->info;
+ regs->regs[2] = (unsigned long) &frame->uc;
+ regs->flags |= PT_FLAGS_CALLER_SAVES;
/*
* Notify any tracer that was single-stepping it.
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index 38a68b0b4..ea2e0ce 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -175,7 +175,7 @@
pr_err(" <received signal %d>\n",
frame->info.si_signo);
}
- return &frame->uc.uc_mcontext.regs;
+ return (struct pt_regs *)&frame->uc.uc_mcontext;
}
return NULL;
}
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c
index cd145ed..49b5e1e 100644
--- a/arch/um/kernel/exec.c
+++ b/arch/um/kernel/exec.c
@@ -62,7 +62,7 @@
return error;
}
-long um_execve(const char *file, char __user *__user *argv, char __user *__user *env)
+long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env)
{
long err;
@@ -72,8 +72,8 @@
return err;
}
-long sys_execve(const char __user *file, char __user *__user *argv,
- char __user *__user *env)
+long sys_execve(const char __user *file, const char __user *const __user *argv,
+ const char __user *const __user *env)
{
long error;
char *filename;
diff --git a/arch/um/kernel/internal.h b/arch/um/kernel/internal.h
index 1303a10..5bf97db 100644
--- a/arch/um/kernel/internal.h
+++ b/arch/um/kernel/internal.h
@@ -1 +1 @@
-extern long um_execve(const char *file, char __user *__user *argv, char __user *__user *env);
+extern long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env);
diff --git a/arch/um/kernel/syscall.c b/arch/um/kernel/syscall.c
index 5ddb246..f958cb8 100644
--- a/arch/um/kernel/syscall.c
+++ b/arch/um/kernel/syscall.c
@@ -60,8 +60,8 @@
fs = get_fs();
set_fs(KERNEL_DS);
- ret = um_execve(filename, (char __user *__user *)argv,
- (char __user *__user *) envp);
+ ret = um_execve(filename, (const char __user *const __user *)argv,
+ (const char __user *const __user *) envp);
set_fs(fs);
return ret;
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 8aa1b59..e8c8881 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -74,7 +74,7 @@
ifdef CONFIG_CC_STACKPROTECTOR
cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
- ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(biarch)),y)
+ ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y)
stackp-y := -fstack-protector
KBUILD_CFLAGS += $(stackp-y)
else
diff --git a/arch/x86/boot/early_serial_console.c b/arch/x86/boot/early_serial_console.c
index 030f4b9..5df2869 100644
--- a/arch/x86/boot/early_serial_console.c
+++ b/arch/x86/boot/early_serial_console.c
@@ -58,7 +58,19 @@
if (arg[pos] == ',')
pos++;
- if (!strncmp(arg, "ttyS", 4)) {
+ /*
+ * make sure we have
+ * "serial,0x3f8,115200"
+ * "serial,ttyS0,115200"
+ * "ttyS0,115200"
+ */
+ if (pos == 7 && !strncmp(arg + pos, "0x", 2)) {
+ port = simple_strtoull(arg + pos, &e, 16);
+ if (port == 0 || arg + pos == e)
+ port = DEFAULT_SERIAL_PORT;
+ else
+ pos = e - arg;
+ } else if (!strncmp(arg + pos, "ttyS", 4)) {
static const int bases[] = { 0x3f8, 0x2f8 };
int idx = 0;
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index b86feab..518bb99 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -50,7 +50,12 @@
/*
* Reload arg registers from stack in case ptrace changed them.
* We don't reload %eax because syscall_trace_enter() returned
- * the value it wants us to use in the table lookup.
+ * the %rax value we should see. Instead, we just truncate that
+ * value to 32 bits again as we did on entry from user mode.
+ * If it's a new value set by user_regset during entry tracing,
+ * this matches the normal truncation of the user-mode value.
+ * If it's -1 to make us punt the syscall, then (u32)-1 is still
+ * an appropriately invalid value.
*/
.macro LOAD_ARGS32 offset, _r9=0
.if \_r9
@@ -60,6 +65,7 @@
movl \offset+48(%rsp),%edx
movl \offset+56(%rsp),%esi
movl \offset+64(%rsp),%edi
+ movl %eax,%eax /* zero extension */
.endm
.macro CFI_STARTPROC32 simple
@@ -153,7 +159,7 @@
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
CFI_REMEMBER_STATE
jnz sysenter_tracesys
- cmpl $(IA32_NR_syscalls-1),%eax
+ cmpq $(IA32_NR_syscalls-1),%rax
ja ia32_badsys
sysenter_do_call:
IA32_ARG_FIXUP
@@ -195,7 +201,7 @@
movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
call audit_syscall_entry
movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
- cmpl $(IA32_NR_syscalls-1),%eax
+ cmpq $(IA32_NR_syscalls-1),%rax
ja ia32_badsys
movl %ebx,%edi /* reload 1st syscall arg */
movl RCX-ARGOFFSET(%rsp),%esi /* reload 2nd syscall arg */
@@ -248,7 +254,7 @@
call syscall_trace_enter
LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
RESTORE_REST
- cmpl $(IA32_NR_syscalls-1),%eax
+ cmpq $(IA32_NR_syscalls-1),%rax
ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
jmp sysenter_do_call
CFI_ENDPROC
@@ -314,7 +320,7 @@
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
CFI_REMEMBER_STATE
jnz cstar_tracesys
- cmpl $IA32_NR_syscalls-1,%eax
+ cmpq $IA32_NR_syscalls-1,%rax
ja ia32_badsys
cstar_do_call:
IA32_ARG_FIXUP 1
@@ -367,7 +373,7 @@
LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
RESTORE_REST
xchgl %ebp,%r9d
- cmpl $(IA32_NR_syscalls-1),%eax
+ cmpq $(IA32_NR_syscalls-1),%rax
ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
jmp cstar_do_call
END(ia32_cstar_target)
@@ -425,7 +431,7 @@
orl $TS_COMPAT,TI_status(%r10)
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
jnz ia32_tracesys
- cmpl $(IA32_NR_syscalls-1),%eax
+ cmpq $(IA32_NR_syscalls-1),%rax
ja ia32_badsys
ia32_do_call:
IA32_ARG_FIXUP
@@ -444,7 +450,7 @@
call syscall_trace_enter
LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
RESTORE_REST
- cmpl $(IA32_NR_syscalls-1),%eax
+ cmpq $(IA32_NR_syscalls-1),%rax
ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
jmp ia32_do_call
END(ia32_syscall)
diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h
index d2544f1..cb03037 100644
--- a/arch/x86/include/asm/amd_iommu_proto.h
+++ b/arch/x86/include/asm/amd_iommu_proto.h
@@ -38,4 +38,10 @@
#endif /* !CONFIG_AMD_IOMMU_STATS */
+static inline bool is_rd890_iommu(struct pci_dev *pdev)
+{
+ return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
+ (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
+}
+
#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index 7014e88..0861618 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -368,6 +368,9 @@
/* capabilities of that IOMMU read from ACPI */
u32 cap;
+ /* flags read from acpi table */
+ u8 acpi_flags;
+
/*
* Capability pointer. There could be more than one IOMMU per PCI
* device function if there are more than one AMD IOMMU capability
@@ -411,6 +414,15 @@
/* default dma_ops domain for that IOMMU */
struct dma_ops_domain *default_dom;
+
+ /*
+ * This array is required to work around a potential BIOS bug.
+ * The BIOS may miss to restore parts of the PCI configuration
+ * space when the system resumes from S3. The result is that the
+ * IOMMU does not execute commands anymore which leads to system
+ * failure.
+ */
+ u32 cache_cfg[4];
};
/*
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 545776e..bafd80d 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -309,7 +309,7 @@
static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
{
return ((1UL << (nr % BITS_PER_LONG)) &
- (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
+ (addr[nr / BITS_PER_LONG])) != 0;
}
static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index 306160e..1d9cd27 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -205,7 +205,7 @@
return (u32)(unsigned long)uptr;
}
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
{
struct pt_regs *regs = task_pt_regs(current);
return (void __user *)regs->sp - len;
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 781a50b..3f76523 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -168,6 +168,7 @@
#define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */
#define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */
#define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */
+#define X86_FEATURE_DTS (7*32+ 7) /* Digital Thermal Sensor */
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
@@ -296,6 +297,7 @@
#endif /* CONFIG_X86_64 */
+#if __GNUC__ >= 4
/*
* Static testing of CPU features. Used the same as boot_cpu_has().
* These are only valid after alternatives have run, but will statically
@@ -304,7 +306,7 @@
*/
static __always_inline __pure bool __static_cpu_has(u16 bit)
{
-#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
+#if __GNUC__ > 4 || __GNUC_MINOR__ >= 5
asm goto("1: jmp %l[t_no]\n"
"2:\n"
".section .altinstructions,\"a\"\n"
@@ -345,7 +347,6 @@
#endif
}
-#if __GNUC__ >= 4
#define static_cpu_has(bit) \
( \
__builtin_constant_p(boot_cpu_has(bit)) ? \
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h
index 004e6e2..1d5c08a 100644
--- a/arch/x86/include/asm/hpet.h
+++ b/arch/x86/include/asm/hpet.h
@@ -68,7 +68,6 @@
extern u8 hpet_blockid;
extern int hpet_force_user;
extern u8 hpet_msi_disable;
-extern u8 hpet_readback_cmp;
extern int is_hpet_enabled(void);
extern int hpet_enable(void);
extern void hpet_disable(void);
diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h
index 528a11e..824ca07 100644
--- a/arch/x86/include/asm/hw_breakpoint.h
+++ b/arch/x86/include/asm/hw_breakpoint.h
@@ -20,7 +20,7 @@
#include <linux/list.h>
/* Available HW breakpoint length encodings */
-#define X86_BREAKPOINT_LEN_X 0x00
+#define X86_BREAKPOINT_LEN_X 0x40
#define X86_BREAKPOINT_LEN_1 0x40
#define X86_BREAKPOINT_LEN_2 0x44
#define X86_BREAKPOINT_LEN_4 0x4c
diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h
index f35eb45..c4191b3 100644
--- a/arch/x86/include/asm/iomap.h
+++ b/arch/x86/include/asm/iomap.h
@@ -26,11 +26,11 @@
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
-void *
+void __iomem *
iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
void
-iounmap_atomic(void *kvaddr, enum km_type type);
+iounmap_atomic(void __iomem *kvaddr, enum km_type type);
int
iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 51cfd73..1f99ecf 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -152,9 +152,14 @@
struct operand {
enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type;
unsigned int bytes;
- unsigned long orig_val, *ptr;
+ union {
+ unsigned long orig_val;
+ u64 orig_val64;
+ };
+ unsigned long *ptr;
union {
unsigned long val;
+ u64 val64;
char valptr[sizeof(unsigned long) + 2];
};
};
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 0925676..fedf32a 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -11,6 +11,8 @@
CFLAGS_REMOVE_tsc.o = -pg
CFLAGS_REMOVE_rtc.o = -pg
CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
+CFLAGS_REMOVE_pvclock.o = -pg
+CFLAGS_REMOVE_kvmclock.o = -pg
CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_early_printk.o = -pg
endif
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index fa044e1..679b645 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -1953,6 +1953,7 @@
size_t size,
int dir)
{
+ dma_addr_t flush_addr;
dma_addr_t i, start;
unsigned int pages;
@@ -1960,6 +1961,7 @@
(dma_addr + size > dma_dom->aperture_size))
return;
+ flush_addr = dma_addr;
pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
dma_addr &= PAGE_MASK;
start = dma_addr;
@@ -1974,7 +1976,7 @@
dma_ops_free_addresses(dma_dom, dma_addr, pages);
if (amd_iommu_unmap_flush || dma_dom->need_flush) {
- iommu_flush_pages(&dma_dom->domain, dma_addr, size);
+ iommu_flush_pages(&dma_dom->domain, flush_addr, size);
dma_dom->need_flush = false;
}
}
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 3cc63e2..5a170cb 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -632,6 +632,13 @@
iommu->last_device = calc_devid(MMIO_GET_BUS(range),
MMIO_GET_LD(range));
iommu->evt_msi_num = MMIO_MSI_NUM(misc);
+
+ if (is_rd890_iommu(iommu->dev)) {
+ pci_read_config_dword(iommu->dev, 0xf0, &iommu->cache_cfg[0]);
+ pci_read_config_dword(iommu->dev, 0xf4, &iommu->cache_cfg[1]);
+ pci_read_config_dword(iommu->dev, 0xf8, &iommu->cache_cfg[2]);
+ pci_read_config_dword(iommu->dev, 0xfc, &iommu->cache_cfg[3]);
+ }
}
/*
@@ -649,29 +656,9 @@
struct ivhd_entry *e;
/*
- * First set the recommended feature enable bits from ACPI
- * into the IOMMU control registers
+ * First save the recommended feature enable bits from ACPI
*/
- h->flags & IVHD_FLAG_HT_TUN_EN_MASK ?
- iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
- iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
-
- h->flags & IVHD_FLAG_PASSPW_EN_MASK ?
- iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
- iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
-
- h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
- iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
- iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
-
- h->flags & IVHD_FLAG_ISOC_EN_MASK ?
- iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
- iommu_feature_disable(iommu, CONTROL_ISOC_EN);
-
- /*
- * make IOMMU memory accesses cache coherent
- */
- iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
+ iommu->acpi_flags = h->flags;
/*
* Done. Now parse the device entries
@@ -1116,6 +1103,40 @@
}
}
+static void iommu_init_flags(struct amd_iommu *iommu)
+{
+ iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
+ iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
+ iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
+
+ iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
+ iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
+ iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
+
+ iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
+ iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
+ iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
+
+ iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
+ iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
+ iommu_feature_disable(iommu, CONTROL_ISOC_EN);
+
+ /*
+ * make IOMMU memory accesses cache coherent
+ */
+ iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
+}
+
+static void iommu_apply_quirks(struct amd_iommu *iommu)
+{
+ if (is_rd890_iommu(iommu->dev)) {
+ pci_write_config_dword(iommu->dev, 0xf0, iommu->cache_cfg[0]);
+ pci_write_config_dword(iommu->dev, 0xf4, iommu->cache_cfg[1]);
+ pci_write_config_dword(iommu->dev, 0xf8, iommu->cache_cfg[2]);
+ pci_write_config_dword(iommu->dev, 0xfc, iommu->cache_cfg[3]);
+ }
+}
+
/*
* This function finally enables all IOMMUs found in the system after
* they have been initialized
@@ -1126,6 +1147,8 @@
for_each_iommu(iommu) {
iommu_disable(iommu);
+ iommu_apply_quirks(iommu);
+ iommu_init_flags(iommu);
iommu_set_device_table(iommu);
iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu);
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 7b598b8..f744f54 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -698,9 +698,11 @@
for (j = 0; j < 64; j++) {
if (!test_bit(j, &present))
continue;
- uv_blade_info[blade].pnode = (i * 64 + j);
+ pnode = (i * 64 + j);
+ uv_blade_info[blade].pnode = pnode;
uv_blade_info[blade].nr_possible_cpus = 0;
uv_blade_info[blade].nr_online_cpus = 0;
+ max_pnode = max(pnode, max_pnode);
blade++;
}
}
@@ -738,7 +740,6 @@
uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid);
uv_node_to_blade[nid] = blade;
uv_cpu_to_blade[cpu] = blade;
- max_pnode = max(pnode, max_pnode);
}
/* Add blade/pnode info for nodes without cpus */
@@ -750,7 +751,6 @@
pnode = (paddr >> m_val) & pnode_mask;
blade = boot_pnode_to_blade(pnode);
uv_node_to_blade[nid] = blade;
- max_pnode = max(pnode, max_pnode);
}
map_gru_high(max_pnode);
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 224392d..5e97529 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -530,7 +530,7 @@
err = -ENOMEM;
goto out;
}
- if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
+ if (!zalloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
kfree(b);
err = -ENOMEM;
goto out;
@@ -543,7 +543,7 @@
#ifndef CONFIG_SMP
cpumask_setall(b->cpus);
#else
- cpumask_copy(b->cpus, c->llc_shared_map);
+ cpumask_set_cpu(cpu, b->cpus);
#endif
per_cpu(threshold_banks, cpu)[bank] = b;
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index c2a8b26..d9368ee 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -202,10 +202,11 @@
#ifdef CONFIG_SYSFS
/* Add/Remove thermal_throttle interface for CPU device: */
-static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev)
+static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev,
+ unsigned int cpu)
{
int err;
- struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group);
if (err)
@@ -251,7 +252,7 @@
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
mutex_lock(&therm_cpu_lock);
- err = thermal_throttle_add_dev(sys_dev);
+ err = thermal_throttle_add_dev(sys_dev, cpu);
mutex_unlock(&therm_cpu_lock);
WARN_ON(err);
break;
@@ -287,7 +288,7 @@
#endif
/* connect live CPUs to sysfs */
for_each_online_cpu(cpu) {
- err = thermal_throttle_add_dev(get_cpu_sysdev(cpu));
+ err = thermal_throttle_add_dev(get_cpu_sysdev(cpu), cpu);
WARN_ON(err);
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index f2da20f..03a5b03 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -102,6 +102,7 @@
*/
struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
int enabled;
int n_events;
@@ -1010,6 +1011,7 @@
x86_perf_event_set_period(event);
cpuc->events[idx] = event;
__set_bit(idx, cpuc->active_mask);
+ __set_bit(idx, cpuc->running);
x86_pmu.enable(event);
perf_event_update_userpage(event);
@@ -1141,8 +1143,16 @@
cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
- if (!test_bit(idx, cpuc->active_mask))
+ if (!test_bit(idx, cpuc->active_mask)) {
+ /*
+ * Though we deactivated the counter some cpus
+ * might still deliver spurious interrupts still
+ * in flight. Catch them:
+ */
+ if (__test_and_clear_bit(idx, cpuc->running))
+ handled++;
continue;
+ }
event = cpuc->events[idx];
hwc = &event->hw;
@@ -1154,7 +1164,7 @@
/*
* event overflow
*/
- handled = 1;
+ handled++;
data.period = event->hw.last_period;
if (!x86_perf_event_set_period(event))
@@ -1200,12 +1210,20 @@
apic_write(APIC_LVTPC, APIC_DM_NMI);
}
+struct pmu_nmi_state {
+ unsigned int marked;
+ int handled;
+};
+
+static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
+
static int __kprobes
perf_event_nmi_handler(struct notifier_block *self,
unsigned long cmd, void *__args)
{
struct die_args *args = __args;
- struct pt_regs *regs;
+ unsigned int this_nmi;
+ int handled;
if (!atomic_read(&active_events))
return NOTIFY_DONE;
@@ -1214,22 +1232,47 @@
case DIE_NMI:
case DIE_NMI_IPI:
break;
-
+ case DIE_NMIUNKNOWN:
+ this_nmi = percpu_read(irq_stat.__nmi_count);
+ if (this_nmi != __get_cpu_var(pmu_nmi).marked)
+ /* let the kernel handle the unknown nmi */
+ return NOTIFY_DONE;
+ /*
+ * This one is a PMU back-to-back nmi. Two events
+ * trigger 'simultaneously' raising two back-to-back
+ * NMIs. If the first NMI handles both, the latter
+ * will be empty and daze the CPU. So, we drop it to
+ * avoid false-positive 'unknown nmi' messages.
+ */
+ return NOTIFY_STOP;
default:
return NOTIFY_DONE;
}
- regs = args->regs;
-
apic_write(APIC_LVTPC, APIC_DM_NMI);
- /*
- * Can't rely on the handled return value to say it was our NMI, two
- * events could trigger 'simultaneously' raising two back-to-back NMIs.
- *
- * If the first NMI handles both, the latter will be empty and daze
- * the CPU.
- */
- x86_pmu.handle_irq(regs);
+
+ handled = x86_pmu.handle_irq(args->regs);
+ if (!handled)
+ return NOTIFY_DONE;
+
+ this_nmi = percpu_read(irq_stat.__nmi_count);
+ if ((handled > 1) ||
+ /* the next nmi could be a back-to-back nmi */
+ ((__get_cpu_var(pmu_nmi).marked == this_nmi) &&
+ (__get_cpu_var(pmu_nmi).handled > 1))) {
+ /*
+ * We could have two subsequent back-to-back nmis: The
+ * first handles more than one counter, the 2nd
+ * handles only one counter and the 3rd handles no
+ * counter.
+ *
+ * This is the 2nd nmi because the previous was
+ * handling more than one counter. We will mark the
+ * next (3rd) and then drop it if unhandled.
+ */
+ __get_cpu_var(pmu_nmi).marked = this_nmi + 1;
+ __get_cpu_var(pmu_nmi).handled = handled;
+ }
return NOTIFY_STOP;
}
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index d8d86d0..ee05c90 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -712,7 +712,8 @@
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
int bit, loops;
- u64 ack, status;
+ u64 status;
+ int handled = 0;
perf_sample_data_init(&data, 0);
@@ -728,6 +729,7 @@
loops = 0;
again:
+ intel_pmu_ack_status(status);
if (++loops > 100) {
WARN_ONCE(1, "perfevents: irq loop stuck!\n");
perf_event_print_debug();
@@ -736,19 +738,22 @@
}
inc_irq_stat(apic_perf_irqs);
- ack = status;
intel_pmu_lbr_read();
/*
* PEBS overflow sets bit 62 in the global status register
*/
- if (__test_and_clear_bit(62, (unsigned long *)&status))
+ if (__test_and_clear_bit(62, (unsigned long *)&status)) {
+ handled++;
x86_pmu.drain_pebs(regs);
+ }
for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
struct perf_event *event = cpuc->events[bit];
+ handled++;
+
if (!test_bit(bit, cpuc->active_mask))
continue;
@@ -761,8 +766,6 @@
x86_pmu_stop(event);
}
- intel_pmu_ack_status(ack);
-
/*
* Repeat if there is more work to be done:
*/
@@ -772,7 +775,7 @@
done:
intel_pmu_enable_all(0);
- return 1;
+ return handled;
}
static struct event_constraint *
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 7e578e9..b560db3 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -692,7 +692,7 @@
inc_irq_stat(apic_perf_irqs);
}
- return handled > 0;
+ return handled;
}
/*
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 34b4dad..d490795 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -31,6 +31,7 @@
const struct cpuid_bit *cb;
static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
+ { X86_FEATURE_DTS, CR_EAX, 0, 0x00000006, 0 },
{ X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 },
{ X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
{ X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 },
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index e5cc7e8..ebdb85c 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -18,7 +18,6 @@
#include <asm/apic.h>
#include <asm/iommu.h>
#include <asm/gart.h>
-#include <asm/hpet.h>
static void __init fix_hypertransport_config(int num, int slot, int func)
{
@@ -192,21 +191,6 @@
}
#endif
-/*
- * Force the read back of the CMP register in hpet_next_event()
- * to work around the problem that the CMP register write seems to be
- * delayed. See hpet_next_event() for details.
- *
- * We do this on all SMBUS incarnations for now until we have more
- * information about the affected chipsets.
- */
-static void __init ati_hpet_bugs(int num, int slot, int func)
-{
-#ifdef CONFIG_HPET_TIMER
- hpet_readback_cmp = 1;
-#endif
-}
-
#define QFLAG_APPLY_ONCE 0x1
#define QFLAG_APPLIED 0x2
#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED)
@@ -236,8 +220,6 @@
PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs },
{ PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd },
- { PCI_VENDOR_ID_ATI, PCI_ANY_ID,
- PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_hpet_bugs },
{}
};
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 351f9c0..410fdb3 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -35,7 +35,6 @@
unsigned long hpet_address;
u8 hpet_blockid; /* OS timer block num */
u8 hpet_msi_disable;
-u8 hpet_readback_cmp;
#ifdef CONFIG_PCI_MSI
static unsigned long hpet_num_timers;
@@ -395,23 +394,27 @@
* at that point and we would wait for the next hpet interrupt
* forever. We found out that reading the CMP register back
* forces the transfer so we can rely on the comparison with
- * the counter register below.
+ * the counter register below. If the read back from the
+ * compare register does not match the value we programmed
+ * then we might have a real hardware problem. We can not do
+ * much about it here, but at least alert the user/admin with
+ * a prominent warning.
*
- * That works fine on those ATI chipsets, but on newer Intel
- * chipsets (ICH9...) this triggers due to an erratum: Reading
- * the comparator immediately following a write is returning
- * the old value.
+ * An erratum on some chipsets (ICH9,..), results in
+ * comparator read immediately following a write returning old
+ * value. Workaround for this is to read this value second
+ * time, when first read returns old value.
*
- * We restrict the read back to the affected ATI chipsets (set
- * by quirks) and also run it with hpet=verbose for debugging
- * purposes.
+ * In fact the write to the comparator register is delayed up
+ * to two HPET cycles so the workaround we tried to restrict
+ * the readback to those known to be borked ATI chipsets
+ * failed miserably. So we give up on optimizations forever
+ * and penalize all HPET incarnations unconditionally.
*/
- if (hpet_readback_cmp || hpet_verbose) {
- u32 cmp = hpet_readl(HPET_Tn_CMP(timer));
-
- if (cmp != cnt)
+ if (unlikely((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt)) {
+ if (hpet_readl(HPET_Tn_CMP(timer)) != cnt)
printk_once(KERN_WARNING
- "hpet: compare register read back failed.\n");
+ "hpet: compare register read back failed.\n");
}
return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index a474ec3..ff15c9d 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -206,11 +206,27 @@
int arch_bp_generic_fields(int x86_len, int x86_type,
int *gen_len, int *gen_type)
{
+ /* Type */
+ switch (x86_type) {
+ case X86_BREAKPOINT_EXECUTE:
+ if (x86_len != X86_BREAKPOINT_LEN_X)
+ return -EINVAL;
+
+ *gen_type = HW_BREAKPOINT_X;
+ *gen_len = sizeof(long);
+ return 0;
+ case X86_BREAKPOINT_WRITE:
+ *gen_type = HW_BREAKPOINT_W;
+ break;
+ case X86_BREAKPOINT_RW:
+ *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
+ break;
+ default:
+ return -EINVAL;
+ }
+
/* Len */
switch (x86_len) {
- case X86_BREAKPOINT_LEN_X:
- *gen_len = sizeof(long);
- break;
case X86_BREAKPOINT_LEN_1:
*gen_len = HW_BREAKPOINT_LEN_1;
break;
@@ -229,21 +245,6 @@
return -EINVAL;
}
- /* Type */
- switch (x86_type) {
- case X86_BREAKPOINT_EXECUTE:
- *gen_type = HW_BREAKPOINT_X;
- break;
- case X86_BREAKPOINT_WRITE:
- *gen_type = HW_BREAKPOINT_W;
- break;
- case X86_BREAKPOINT_RW:
- *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
- break;
- default:
- return -EINVAL;
- }
-
return 0;
}
@@ -316,9 +317,6 @@
ret = -EINVAL;
switch (info->len) {
- case X86_BREAKPOINT_LEN_X:
- align = sizeof(long) -1;
- break;
case X86_BREAKPOINT_LEN_1:
align = 0;
break;
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
index a874495..e2a5952 100644
--- a/arch/x86/kernel/trampoline.c
+++ b/arch/x86/kernel/trampoline.c
@@ -45,8 +45,7 @@
/* Copy kernel address range */
clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY,
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
- min_t(unsigned long, KERNEL_PGD_PTRS,
- KERNEL_PGD_BOUNDARY));
+ KERNEL_PGD_PTRS);
/* Initialize low mappings */
clone_pgd_range(trampoline_pg_dir,
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index d632934..26a863a 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -655,7 +655,7 @@
local_irq_save(flags);
- get_cpu_var(cyc2ns_offset) = 0;
+ __get_cpu_var(cyc2ns_offset) = 0;
offset = cyc2ns_suspend - sched_clock();
for_each_possible_cpu(cpu)
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index b38bd8b9..66ca98a 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -1870,17 +1870,16 @@
struct x86_emulate_ops *ops)
{
struct decode_cache *c = &ctxt->decode;
- u64 old = c->dst.orig_val;
+ u64 old = c->dst.orig_val64;
if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
-
c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
ctxt->eflags &= ~EFLG_ZF;
} else {
- c->dst.val = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
- (u32) c->regs[VCPU_REGS_RBX];
+ c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
+ (u32) c->regs[VCPU_REGS_RBX];
ctxt->eflags |= EFLG_ZF;
}
@@ -2616,7 +2615,7 @@
c->src.valptr, c->src.bytes);
if (rc != X86EMUL_CONTINUE)
goto done;
- c->src.orig_val = c->src.val;
+ c->src.orig_val64 = c->src.val64;
}
if (c->src2.type == OP_MEM) {
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 8d10c06..4b7b73c 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -64,6 +64,9 @@
if (!found)
found = s->kvm->bsp_vcpu;
+ if (!found)
+ return;
+
kvm_vcpu_kick(found);
}
}
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index ffed068..63c3145 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -43,7 +43,6 @@
u8 irr; /* interrupt request register */
u8 imr; /* interrupt mask register */
u8 isr; /* interrupt service register */
- u8 isr_ack; /* interrupt ack detection */
u8 priority_add; /* highest irq priority */
u8 irq_base;
u8 read_reg_select;
@@ -56,6 +55,7 @@
u8 init4; /* true if 4 byte init */
u8 elcr; /* PIIX edge/trigger selection */
u8 elcr_mask;
+ u8 isr_ack; /* interrupt ack detection */
struct kvm_pic *pics_state;
};
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 9257510..9d5f558 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -324,9 +324,8 @@
}
/*
- * For a single GDT entry which changes, we do the lazy thing: alter our GDT,
- * then tell the Host to reload the entire thing. This operation is so rare
- * that this naive implementation is reasonable.
+ * For a single GDT entry which changes, we simply change our copy and
+ * then tell the host about it.
*/
static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
const void *desc, int type)
@@ -338,9 +337,13 @@
}
/*
- * OK, I lied. There are three "thread local storage" GDT entries which change
+ * There are three "thread local storage" GDT entries which change
* on every context switch (these three entries are how glibc implements
- * __thread variables). So we have a hypercall specifically for this case.
+ * __thread variables). As an optimization, we have a hypercall
+ * specifically for this case.
+ *
+ * Wouldn't it be nicer to have a general LOAD_GDT_ENTRIES hypercall
+ * which took a range of entries?
*/
static void lguest_load_tls(struct thread_struct *t, unsigned int cpu)
{
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index 84e236c..72fc70c 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -74,7 +74,7 @@
/*
* Map 'pfn' using fixed map 'type' and protections 'prot'
*/
-void *
+void __iomem *
iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
{
/*
@@ -86,12 +86,12 @@
if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
prot = PAGE_KERNEL_UC_MINUS;
- return kmap_atomic_prot_pfn(pfn, type, prot);
+ return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, type, prot);
}
EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
void
-iounmap_atomic(void *kvaddr, enum km_type type)
+iounmap_atomic(void __iomem *kvaddr, enum km_type type)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index f6b48f6..009b819 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -568,8 +568,13 @@
int error;
error = sysdev_class_register(&oprofile_sysclass);
- if (!error)
- error = sysdev_register(&device_oprofile);
+ if (error)
+ return error;
+
+ error = sysdev_register(&device_oprofile);
+ if (error)
+ sysdev_class_unregister(&oprofile_sysclass);
+
return error;
}
@@ -580,8 +585,10 @@
}
#else
-#define init_sysfs() do { } while (0)
-#define exit_sysfs() do { } while (0)
+
+static inline int init_sysfs(void) { return 0; }
+static inline void exit_sysfs(void) { }
+
#endif /* CONFIG_PM */
static int __init p4_init(char **cpu_type)
@@ -664,7 +671,9 @@
case 14:
*cpu_type = "i386/core";
break;
- case 15: case 23:
+ case 0x0f:
+ case 0x16:
+ case 0x17:
*cpu_type = "i386/core_2";
break;
case 0x1a:
@@ -695,6 +704,8 @@
char *cpu_type = NULL;
int ret = 0;
+ using_nmi = 0;
+
if (!cpu_has_apic)
return -ENODEV;
@@ -774,7 +785,10 @@
mux_init(ops);
- init_sysfs();
+ ret = init_sysfs();
+ if (ret)
+ return ret;
+
using_nmi = 1;
printk(KERN_INFO "oprofile: using NMI interrupt.\n");
return 0;
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index a680964..2fef1ef 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -966,7 +966,7 @@
/* Currently we do not support hierarchy deeper than two level (0,1) */
if (parent != cgroup->top_cgroup)
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-EPERM);
blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
if (!blkcg)
diff --git a/block/blk-core.c b/block/blk-core.c
index ee1a1e7..32a1c12 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1198,9 +1198,9 @@
int el_ret;
unsigned int bytes = bio->bi_size;
const unsigned short prio = bio_prio(bio);
- const bool sync = (bio->bi_rw & REQ_SYNC);
- const bool unplug = (bio->bi_rw & REQ_UNPLUG);
- const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
+ const bool sync = !!(bio->bi_rw & REQ_SYNC);
+ const bool unplug = !!(bio->bi_rw & REQ_UNPLUG);
+ const unsigned long ff = bio->bi_rw & REQ_FAILFAST_MASK;
int rw_flags;
if ((bio->bi_rw & REQ_HARDBARRIER) &&
diff --git a/block/blk-map.c b/block/blk-map.c
index c65d759..ade0a08 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -307,7 +307,7 @@
return PTR_ERR(bio);
if (rq_data_dir(rq) == WRITE)
- bio->bi_rw |= (1 << REQ_WRITE);
+ bio->bi_rw |= REQ_WRITE;
if (do_copy)
rq->cmd_flags |= REQ_COPY_USER;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 3b0cd42..eafc94f 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -362,6 +362,18 @@
return 0;
/*
+ * Don't merge file system requests and discard requests
+ */
+ if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD))
+ return 0;
+
+ /*
+ * Don't merge discard requests and secure discard requests
+ */
+ if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE))
+ return 0;
+
+ /*
* not contiguous
*/
if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 001ab18..0749b89 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -511,6 +511,7 @@
kobject_uevent(&q->kobj, KOBJ_REMOVE);
kobject_del(&q->kobj);
blk_trace_remove_sysfs(disk_to_dev(disk));
+ kobject_put(&dev->kobj);
return ret;
}
diff --git a/block/blk.h b/block/blk.h
index 6e7dc87..d6b911a 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -142,14 +142,18 @@
static inline int blk_cpu_to_group(int cpu)
{
+ int group = NR_CPUS;
#ifdef CONFIG_SCHED_MC
const struct cpumask *mask = cpu_coregroup_mask(cpu);
- return cpumask_first(mask);
+ group = cpumask_first(mask);
#elif defined(CONFIG_SCHED_SMT)
- return cpumask_first(topology_thread_cpumask(cpu));
+ group = cpumask_first(topology_thread_cpumask(cpu));
#else
return cpu;
#endif
+ if (likely(group < NR_CPUS))
+ return group;
+ return cpu;
}
/*
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index eb4086f..9eba291 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -30,6 +30,7 @@
static int cfq_slice_async = HZ / 25;
static const int cfq_slice_async_rq = 2;
static int cfq_slice_idle = HZ / 125;
+static int cfq_group_idle = HZ / 125;
static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
static const int cfq_hist_divisor = 4;
@@ -147,6 +148,8 @@
struct cfq_queue *new_cfqq;
struct cfq_group *cfqg;
struct cfq_group *orig_cfqg;
+ /* Number of sectors dispatched from queue in single dispatch round */
+ unsigned long nr_sectors;
};
/*
@@ -198,6 +201,8 @@
struct hlist_node cfqd_node;
atomic_t ref;
#endif
+ /* number of requests that are on the dispatch list or inside driver */
+ int dispatched;
};
/*
@@ -271,6 +276,7 @@
unsigned int cfq_slice[2];
unsigned int cfq_slice_async_rq;
unsigned int cfq_slice_idle;
+ unsigned int cfq_group_idle;
unsigned int cfq_latency;
unsigned int cfq_group_isolation;
@@ -378,6 +384,21 @@
&cfqg->service_trees[i][j]: NULL) \
+static inline bool iops_mode(struct cfq_data *cfqd)
+{
+ /*
+ * If we are not idling on queues and it is a NCQ drive, parallel
+ * execution of requests is on and measuring time is not possible
+ * in most of the cases until and unless we drive shallower queue
+ * depths and that becomes a performance bottleneck. In such cases
+ * switch to start providing fairness in terms of number of IOs.
+ */
+ if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
+ return true;
+ else
+ return false;
+}
+
static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
{
if (cfq_class_idle(cfqq))
@@ -906,7 +927,6 @@
slice_used = cfqq->allocated_slice;
}
- cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u", slice_used);
return slice_used;
}
@@ -914,19 +934,21 @@
struct cfq_queue *cfqq)
{
struct cfq_rb_root *st = &cfqd->grp_service_tree;
- unsigned int used_sl, charge_sl;
+ unsigned int used_sl, charge;
int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
- cfqg->service_tree_idle.count;
BUG_ON(nr_sync < 0);
- used_sl = charge_sl = cfq_cfqq_slice_usage(cfqq);
+ used_sl = charge = cfq_cfqq_slice_usage(cfqq);
- if (!cfq_cfqq_sync(cfqq) && !nr_sync)
- charge_sl = cfqq->allocated_slice;
+ if (iops_mode(cfqd))
+ charge = cfqq->slice_dispatch;
+ else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
+ charge = cfqq->allocated_slice;
/* Can't update vdisktime while group is on service tree */
cfq_rb_erase(&cfqg->rb_node, st);
- cfqg->vdisktime += cfq_scale_slice(charge_sl, cfqg);
+ cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
__cfq_group_service_tree_add(st, cfqg);
/* This group is being expired. Save the context */
@@ -940,6 +962,9 @@
cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
st->min_vdisktime);
+ cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u"
+ " sect=%u", used_sl, cfqq->slice_dispatch, charge,
+ iops_mode(cfqd), cfqq->nr_sectors);
cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
}
@@ -994,10 +1019,20 @@
*/
atomic_set(&cfqg->ref, 1);
- /* Add group onto cgroup list */
- sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
- cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
+ /*
+ * Add group onto cgroup list. It might happen that bdi->dev is
+ * not initiliazed yet. Initialize this new group without major
+ * and minor info and this info will be filled in once a new thread
+ * comes for IO. See code above.
+ */
+ if (bdi->dev) {
+ sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
+ cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
MKDEV(major, minor));
+ } else
+ cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
+ 0);
+
cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
/* Add group on cfqd list */
@@ -1587,6 +1622,7 @@
cfqq->allocated_slice = 0;
cfqq->slice_end = 0;
cfqq->slice_dispatch = 0;
+ cfqq->nr_sectors = 0;
cfq_clear_cfqq_wait_request(cfqq);
cfq_clear_cfqq_must_dispatch(cfqq);
@@ -1839,6 +1875,9 @@
BUG_ON(!service_tree);
BUG_ON(!service_tree->count);
+ if (!cfqd->cfq_slice_idle)
+ return false;
+
/* We never do for idle class queues. */
if (prio == IDLE_WORKLOAD)
return false;
@@ -1863,7 +1902,7 @@
{
struct cfq_queue *cfqq = cfqd->active_queue;
struct cfq_io_context *cic;
- unsigned long sl;
+ unsigned long sl, group_idle = 0;
/*
* SSD device without seek penalty, disable idling. But only do so
@@ -1879,8 +1918,13 @@
/*
* idle is disabled, either manually or by past process history
*/
- if (!cfqd->cfq_slice_idle || !cfq_should_idle(cfqd, cfqq))
- return;
+ if (!cfq_should_idle(cfqd, cfqq)) {
+ /* no queue idling. Check for group idling */
+ if (cfqd->cfq_group_idle)
+ group_idle = cfqd->cfq_group_idle;
+ else
+ return;
+ }
/*
* still active requests from this queue, don't idle
@@ -1907,13 +1951,21 @@
return;
}
+ /* There are other queues in the group, don't do group idle */
+ if (group_idle && cfqq->cfqg->nr_cfqq > 1)
+ return;
+
cfq_mark_cfqq_wait_request(cfqq);
- sl = cfqd->cfq_slice_idle;
+ if (group_idle)
+ sl = cfqd->cfq_group_idle;
+ else
+ sl = cfqd->cfq_slice_idle;
mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
- cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
+ cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
+ group_idle ? 1 : 0);
}
/*
@@ -1929,9 +1981,11 @@
cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
cfq_remove_request(rq);
cfqq->dispatched++;
+ (RQ_CFQG(rq))->dispatched++;
elv_dispatch_sort(q, rq);
cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
+ cfqq->nr_sectors += blk_rq_sectors(rq);
cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
rq_data_dir(rq), rq_is_sync(rq));
}
@@ -2198,7 +2252,7 @@
cfqq = NULL;
goto keep_queue;
} else
- goto expire;
+ goto check_group_idle;
}
/*
@@ -2226,8 +2280,23 @@
* flight or is idling for a new request, allow either of these
* conditions to happen (or time out) before selecting a new queue.
*/
- if (timer_pending(&cfqd->idle_slice_timer) ||
- (cfqq->dispatched && cfq_should_idle(cfqd, cfqq))) {
+ if (timer_pending(&cfqd->idle_slice_timer)) {
+ cfqq = NULL;
+ goto keep_queue;
+ }
+
+ if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
+ cfqq = NULL;
+ goto keep_queue;
+ }
+
+ /*
+ * If group idle is enabled and there are requests dispatched from
+ * this group, wait for requests to complete.
+ */
+check_group_idle:
+ if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1
+ && cfqq->cfqg->dispatched) {
cfqq = NULL;
goto keep_queue;
}
@@ -3375,6 +3444,7 @@
WARN_ON(!cfqq->dispatched);
cfqd->rq_in_driver--;
cfqq->dispatched--;
+ (RQ_CFQG(rq))->dispatched--;
cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
rq_start_time_ns(rq), rq_io_start_time_ns(rq),
rq_data_dir(rq), rq_is_sync(rq));
@@ -3404,7 +3474,10 @@
* the queue.
*/
if (cfq_should_wait_busy(cfqd, cfqq)) {
- cfqq->slice_end = jiffies + cfqd->cfq_slice_idle;
+ unsigned long extend_sl = cfqd->cfq_slice_idle;
+ if (!cfqd->cfq_slice_idle)
+ extend_sl = cfqd->cfq_group_idle;
+ cfqq->slice_end = jiffies + extend_sl;
cfq_mark_cfqq_wait_busy(cfqq);
cfq_log_cfqq(cfqd, cfqq, "will busy wait");
}
@@ -3850,6 +3923,7 @@
cfqd->cfq_slice[1] = cfq_slice_sync;
cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
cfqd->cfq_slice_idle = cfq_slice_idle;
+ cfqd->cfq_group_idle = cfq_group_idle;
cfqd->cfq_latency = 1;
cfqd->cfq_group_isolation = 0;
cfqd->hw_tag = -1;
@@ -3922,6 +3996,7 @@
SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
+SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
@@ -3954,6 +4029,7 @@
STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
UINT_MAX, 0);
STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
+STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
@@ -3975,6 +4051,7 @@
CFQ_ATTR(slice_async),
CFQ_ATTR(slice_async_rq),
CFQ_ATTR(slice_idle),
+ CFQ_ATTR(group_idle),
CFQ_ATTR(low_latency),
CFQ_ATTR(group_isolation),
__ATTR_NULL
@@ -4028,6 +4105,12 @@
if (!cfq_slice_idle)
cfq_slice_idle = 1;
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+ if (!cfq_group_idle)
+ cfq_group_idle = 1;
+#else
+ cfq_group_idle = 0;
+#endif
if (cfq_slab_setup())
return -ENOMEM;
diff --git a/block/elevator.c b/block/elevator.c
index ec585c9..205b09a 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -1009,18 +1009,19 @@
{
struct elevator_queue *old_elevator, *e;
void *data;
+ int err;
/*
* Allocate new elevator
*/
e = elevator_alloc(q, new_e);
if (!e)
- return 0;
+ return -ENOMEM;
data = elevator_init_queue(q, e);
if (!data) {
kobject_put(&e->kobj);
- return 0;
+ return -ENOMEM;
}
/*
@@ -1043,7 +1044,8 @@
__elv_unregister_queue(old_elevator);
- if (elv_register_queue(q))
+ err = elv_register_queue(q);
+ if (err)
goto fail_register;
/*
@@ -1056,7 +1058,7 @@
blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
- return 1;
+ return 0;
fail_register:
/*
@@ -1071,17 +1073,19 @@
queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
spin_unlock_irq(q->queue_lock);
- return 0;
+ return err;
}
-ssize_t elv_iosched_store(struct request_queue *q, const char *name,
- size_t count)
+/*
+ * Switch this queue to the given IO scheduler.
+ */
+int elevator_change(struct request_queue *q, const char *name)
{
char elevator_name[ELV_NAME_MAX];
struct elevator_type *e;
if (!q->elevator)
- return count;
+ return -ENXIO;
strlcpy(elevator_name, name, sizeof(elevator_name));
e = elevator_get(strstrip(elevator_name));
@@ -1092,13 +1096,27 @@
if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
elevator_put(e);
- return count;
+ return 0;
}
- if (!elevator_switch(q, e))
- printk(KERN_ERR "elevator: switch to %s failed\n",
- elevator_name);
- return count;
+ return elevator_switch(q, e);
+}
+EXPORT_SYMBOL(elevator_change);
+
+ssize_t elv_iosched_store(struct request_queue *q, const char *name,
+ size_t count)
+{
+ int ret;
+
+ if (!q->elevator)
+ return count;
+
+ ret = elevator_change(q, name);
+ if (!ret)
+ return count;
+
+ printk(KERN_ERR "elevator: switch to %s failed\n", name);
+ return ret;
}
ssize_t elv_iosched_show(struct request_queue *q, char *name)
diff --git a/drivers/Makefile b/drivers/Makefile
index ae47344..a2aea53 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -50,7 +50,7 @@
obj-y += net/
obj-$(CONFIG_ATM) += atm/
obj-$(CONFIG_FUSION) += message/
-obj-$(CONFIG_FIREWIRE) += firewire/
+obj-y += firewire/
obj-y += ieee1394/
obj-$(CONFIG_UIO) += uio/
obj-y += cdrom/
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index d31590e..2737b97 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -298,7 +298,7 @@
amba_put_disable_pclk(dev);
- if (cid == 0xb105f00d)
+ if (cid == AMBA_CID)
dev->periphid = pid;
if (!dev->periphid)
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 013727b..99d0e5a 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -90,6 +90,10 @@
static int ahci_pci_device_resume(struct pci_dev *pdev);
#endif
+static struct scsi_host_template ahci_sht = {
+ AHCI_SHT("ahci"),
+};
+
static struct ata_port_operations ahci_vt8251_ops = {
.inherits = &ahci_ops,
.hardreset = ahci_vt8251_hardreset,
@@ -253,6 +257,9 @@
{ PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
{ PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
{ PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
+ { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
+ { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
+ { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 474427b..e5fdeeb 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -298,7 +298,17 @@
extern int ahci_ignore_sss;
-extern struct scsi_host_template ahci_sht;
+extern struct device_attribute *ahci_shost_attrs[];
+extern struct device_attribute *ahci_sdev_attrs[];
+
+#define AHCI_SHT(drv_name) \
+ ATA_NCQ_SHT(drv_name), \
+ .can_queue = AHCI_MAX_CMDS - 1, \
+ .sg_tablesize = AHCI_MAX_SG, \
+ .dma_boundary = AHCI_DMA_BOUNDARY, \
+ .shost_attrs = ahci_shost_attrs, \
+ .sdev_attrs = ahci_sdev_attrs
+
extern struct ata_port_operations ahci_ops;
void ahci_save_initial_config(struct device *dev,
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 4e97f33..84b6432 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -23,6 +23,10 @@
#include <linux/ahci_platform.h>
#include "ahci.h"
+static struct scsi_host_template ahci_platform_sht = {
+ AHCI_SHT("ahci_platform"),
+};
+
static int __init ahci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -145,7 +149,7 @@
ahci_print_info(host, "platform");
rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
- &ahci_sht);
+ &ahci_platform_sht);
if (rc)
goto err0;
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 3971bc0..d712675 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -302,6 +302,10 @@
{ 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (CPT) */
{ 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (PBG) */
+ { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+ /* SATA Controller IDE (PBG) */
+ { 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
{ } /* terminate list */
};
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 666850d..8eea309 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -121,7 +121,7 @@
static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
ahci_read_em_buffer, ahci_store_em_buffer);
-static struct device_attribute *ahci_shost_attrs[] = {
+struct device_attribute *ahci_shost_attrs[] = {
&dev_attr_link_power_management_policy,
&dev_attr_em_message_type,
&dev_attr_em_message,
@@ -132,22 +132,14 @@
&dev_attr_em_buffer,
NULL
};
+EXPORT_SYMBOL_GPL(ahci_shost_attrs);
-static struct device_attribute *ahci_sdev_attrs[] = {
+struct device_attribute *ahci_sdev_attrs[] = {
&dev_attr_sw_activity,
&dev_attr_unload_heads,
NULL
};
-
-struct scsi_host_template ahci_sht = {
- ATA_NCQ_SHT("ahci"),
- .can_queue = AHCI_MAX_CMDS - 1,
- .sg_tablesize = AHCI_MAX_SG,
- .dma_boundary = AHCI_DMA_BOUNDARY,
- .shost_attrs = ahci_shost_attrs,
- .sdev_attrs = ahci_sdev_attrs,
-};
-EXPORT_SYMBOL_GPL(ahci_sht);
+EXPORT_SYMBOL_GPL(ahci_sdev_attrs);
struct ata_port_operations ahci_ops = {
.inherits = &sata_pmp_port_ops,
@@ -1326,7 +1318,7 @@
/* issue the first D2H Register FIS */
msecs = 0;
now = jiffies;
- if (time_after(now, deadline))
+ if (time_after(deadline, now))
msecs = jiffies_to_msecs(deadline - now);
tf.ctl |= ATA_SRST;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index c035b3d..932eaee 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5418,6 +5418,7 @@
*/
int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
{
+ unsigned int ehi_flags = ATA_EHI_QUIET;
int rc;
/*
@@ -5426,7 +5427,18 @@
*/
ata_lpm_enable(host);
- rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
+ /*
+ * On some hardware, device fails to respond after spun down
+ * for suspend. As the device won't be used before being
+ * resumed, we don't need to touch the device. Ask EH to skip
+ * the usual stuff and proceed directly to suspend.
+ *
+ * http://thread.gmane.org/gmane.linux.ide/46764
+ */
+ if (mesg.event == PM_EVENT_SUSPEND)
+ ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
+
+ rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1);
if (rc == 0)
host->dev->power.power_state = mesg;
return rc;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index c9ae299..e48302e 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -3235,6 +3235,10 @@
if (link->flags & ATA_LFLAG_DISABLED)
return 1;
+ /* skip if explicitly requested */
+ if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
+ return 1;
+
/* thaw frozen port and recover failed devices */
if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
return 0;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 3b82d8e..e30c537 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -418,6 +418,7 @@
if (ioaddr->ctl_addr)
iowrite8(tf->ctl, ioaddr->ctl_addr);
ap->last_ctl = tf->ctl;
+ ata_wait_idle(ap);
}
if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
@@ -453,6 +454,8 @@
iowrite8(tf->device, ioaddr->device_addr);
VPRINTK("device 0x%X\n", tf->device);
}
+
+ ata_wait_idle(ap);
}
EXPORT_SYMBOL_GPL(ata_sff_tf_load);
@@ -1042,7 +1045,8 @@
int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
u8 status, int in_wq)
{
- struct ata_eh_info *ehi = &ap->link.eh_info;
+ struct ata_link *link = qc->dev->link;
+ struct ata_eh_info *ehi = &link->eh_info;
unsigned long flags = 0;
int poll_next;
@@ -1298,8 +1302,14 @@
}
EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
-void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay)
+void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
{
+ struct ata_port *ap = link->ap;
+
+ WARN_ON((ap->sff_pio_task_link != NULL) &&
+ (ap->sff_pio_task_link != link));
+ ap->sff_pio_task_link = link;
+
/* may fail if ata_sff_flush_pio_task() in progress */
queue_delayed_work(ata_sff_wq, &ap->sff_pio_task,
msecs_to_jiffies(delay));
@@ -1321,14 +1331,18 @@
{
struct ata_port *ap =
container_of(work, struct ata_port, sff_pio_task.work);
+ struct ata_link *link = ap->sff_pio_task_link;
struct ata_queued_cmd *qc;
u8 status;
int poll_next;
+ BUG_ON(ap->sff_pio_task_link == NULL);
/* qc can be NULL if timeout occurred */
- qc = ata_qc_from_tag(ap, ap->link.active_tag);
- if (!qc)
+ qc = ata_qc_from_tag(ap, link->active_tag);
+ if (!qc) {
+ ap->sff_pio_task_link = NULL;
return;
+ }
fsm_start:
WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
@@ -1345,11 +1359,16 @@
msleep(2);
status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
if (status & ATA_BUSY) {
- ata_sff_queue_pio_task(ap, ATA_SHORT_PAUSE);
+ ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
return;
}
}
+ /*
+ * hsm_move() may trigger another command to be processed.
+ * clean the link beforehand.
+ */
+ ap->sff_pio_task_link = NULL;
/* move the HSM */
poll_next = ata_sff_hsm_move(ap, qc, status, 1);
@@ -1376,6 +1395,7 @@
unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
+ struct ata_link *link = qc->dev->link;
/* Use polling pio if the LLD doesn't handle
* interrupt driven pio and atapi CDB interrupt.
@@ -1396,7 +1416,7 @@
ap->hsm_task_state = HSM_ST_LAST;
if (qc->tf.flags & ATA_TFLAG_POLLING)
- ata_sff_queue_pio_task(ap, 0);
+ ata_sff_queue_pio_task(link, 0);
break;
@@ -1409,7 +1429,7 @@
if (qc->tf.flags & ATA_TFLAG_WRITE) {
/* PIO data out protocol */
ap->hsm_task_state = HSM_ST_FIRST;
- ata_sff_queue_pio_task(ap, 0);
+ ata_sff_queue_pio_task(link, 0);
/* always send first data block using the
* ata_sff_pio_task() codepath.
@@ -1419,7 +1439,7 @@
ap->hsm_task_state = HSM_ST;
if (qc->tf.flags & ATA_TFLAG_POLLING)
- ata_sff_queue_pio_task(ap, 0);
+ ata_sff_queue_pio_task(link, 0);
/* if polling, ata_sff_pio_task() handles the
* rest. otherwise, interrupt handler takes
@@ -1441,7 +1461,7 @@
/* send cdb by polling if no cdb interrupt */
if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
(qc->tf.flags & ATA_TFLAG_POLLING))
- ata_sff_queue_pio_task(ap, 0);
+ ata_sff_queue_pio_task(link, 0);
break;
default:
@@ -2734,6 +2754,7 @@
unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
+ struct ata_link *link = qc->dev->link;
/* defer PIO handling to sff_qc_issue */
if (!ata_is_dma(qc->tf.protocol))
@@ -2762,7 +2783,7 @@
/* send cdb by polling if no cdb interrupt */
if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
- ata_sff_queue_pio_task(ap, 0);
+ ata_sff_queue_pio_task(link, 0);
break;
default:
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index ba43f0f..2215632 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -74,7 +74,8 @@
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
/* Odd numbered device ids are the units with enable bits (the -R cards) */
- if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
+ if ((pdev->device & 1) &&
+ !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
return -ENOENT;
return ata_sff_prereset(link, deadline);
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 5e65988..ac8d7d9 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -417,6 +417,8 @@
tf->lbam,
tf->lbah);
}
+
+ ata_wait_idle(ap);
}
static int via_port_start(struct ata_port *ap)
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 8198259..a9fd970 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -2284,7 +2284,7 @@
}
if (qc->tf.flags & ATA_TFLAG_POLLING)
- ata_sff_queue_pio_task(ap, 0);
+ ata_sff_queue_pio_task(link, 0);
return 0;
}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 5419a49..276d5a7 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -59,6 +59,7 @@
{
dev->power.status = DPM_ON;
init_completion(&dev->power.completion);
+ complete_all(&dev->power.completion);
dev->power.wakeup_count = 0;
pm_runtime_init(dev);
}
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 31064df..5e4fadc 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -297,6 +297,8 @@
spin_lock_irqsave(&h->lock, flags);
addQ(&h->reqQ, c);
h->Qdepth++;
+ if (h->Qdepth > h->maxQsinceinit)
+ h->maxQsinceinit = h->Qdepth;
start_io(h);
spin_unlock_irqrestore(&h->lock, flags);
}
@@ -4519,6 +4521,12 @@
misc_fw_support = readl(&cfgtable->misc_fw_support);
use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
+ /* The doorbell reset seems to cause lockups on some Smart
+ * Arrays (e.g. P410, P410i, maybe others). Until this is
+ * fixed or at least isolated, avoid the doorbell reset.
+ */
+ use_doorbell = 0;
+
rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
if (rc)
goto unmap_cfgtable;
@@ -4712,6 +4720,9 @@
h->scatter_list = kmalloc(h->max_commands *
sizeof(struct scatterlist *),
GFP_KERNEL);
+ if (!h->scatter_list)
+ goto clean4;
+
for (k = 0; k < h->nr_cmds; k++) {
h->scatter_list[k] = kmalloc(sizeof(struct scatterlist) *
h->maxsgentries,
@@ -4781,7 +4792,7 @@
clean4:
kfree(h->cmd_pool_bits);
/* Free up sg elements */
- for (k = 0; k < h->nr_cmds; k++)
+ for (k-- ; k >= 0; k--)
kfree(h->scatter_list[k]);
kfree(h->scatter_list);
cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f3c636d..91797bb 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -477,7 +477,7 @@
pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
if (bio_rw(bio) == WRITE) {
- bool barrier = (bio->bi_rw & REQ_HARDBARRIER);
+ bool barrier = !!(bio->bi_rw & REQ_HARDBARRIER);
struct file *file = lo->lo_backing_file;
if (barrier) {
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index b82c5ce..76fa3de 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -974,8 +974,7 @@
host->breq->queuedata = host;
/* mflash is random device, thanx for the noop */
- elevator_exit(host->breq->elevator);
- err = elevator_init(host->breq, "noop");
+ err = elevator_change(host->breq, "noop");
if (err) {
printk(KERN_ERR "%s:%d (elevator_init) fail\n",
__func__, __LINE__);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index b1cbeb5..37a2bb5 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2369,7 +2369,7 @@
pkt_shrink_pktlist(pd);
}
-static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor)
+static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
{
if (dev_minor >= MAX_WRITERS)
return NULL;
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index eab58db..cd18493 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -806,6 +806,8 @@
"G45/G43", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG,
"B43", NULL, &intel_i965_driver },
+ { PCI_DEVICE_ID_INTEL_B43_1_HB, PCI_DEVICE_ID_INTEL_B43_1_IG,
+ "B43", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG,
"G41", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index ee189c7..d09b1ab 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -186,6 +186,8 @@
#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40
#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42
+#define PCI_DEVICE_ID_INTEL_B43_1_HB 0x2E90
+#define PCI_DEVICE_ID_INTEL_B43_1_IG 0x2E92
#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 3822b4f..7bd7c45 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -305,6 +305,9 @@
#ifdef CONFIG_PCI
static int pci_registered;
#endif
+#ifdef CONFIG_ACPI
+static int pnp_registered;
+#endif
#ifdef CONFIG_PPC_OF
static int of_registered;
#endif
@@ -2126,7 +2129,7 @@
{
struct acpi_device *acpi_dev;
struct smi_info *info;
- struct resource *res;
+ struct resource *res, *res_second;
acpi_handle handle;
acpi_status status;
unsigned long long tmp;
@@ -2182,13 +2185,13 @@
info->io.addr_data = res->start;
info->io.regspacing = DEFAULT_REGSPACING;
- res = pnp_get_resource(dev,
+ res_second = pnp_get_resource(dev,
(info->io.addr_type == IPMI_IO_ADDR_SPACE) ?
IORESOURCE_IO : IORESOURCE_MEM,
1);
- if (res) {
- if (res->start > info->io.addr_data)
- info->io.regspacing = res->start - info->io.addr_data;
+ if (res_second) {
+ if (res_second->start > info->io.addr_data)
+ info->io.regspacing = res_second->start - info->io.addr_data;
}
info->io.regsize = DEFAULT_REGSPACING;
info->io.regshift = 0;
@@ -3359,6 +3362,7 @@
#ifdef CONFIG_ACPI
pnp_register_driver(&ipmi_pnp_driver);
+ pnp_registered = 1;
#endif
#ifdef CONFIG_DMI
@@ -3526,7 +3530,8 @@
pci_unregister_driver(&ipmi_pci_driver);
#endif
#ifdef CONFIG_ACPI
- pnp_unregister_driver(&ipmi_pnp_driver);
+ if (pnp_registered)
+ pnp_unregister_driver(&ipmi_pnp_driver);
#endif
#ifdef CONFIG_PPC_OF
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index a398ecd..1f528fa 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -788,10 +788,11 @@
/*
* capabilities for /dev/zero
* - permits private mappings, "copies" are taken of the source of zeros
+ * - no writeback happens
*/
static struct backing_dev_info zero_bdi = {
.name = "char/mem",
- .capabilities = BDI_CAP_MAP_COPY,
+ .capabilities = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
static const struct file_operations full_fops = {
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 942a982..c810481 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -596,6 +596,10 @@
ssize_t ret;
bool nonblock;
+ /* Userspace could be out to fool us */
+ if (!count)
+ return 0;
+
port = filp->private_data;
nonblock = filp->f_flags & O_NONBLOCK;
@@ -642,7 +646,7 @@
poll_wait(filp, &port->waitqueue, wait);
ret = 0;
- if (port->inbuf)
+ if (!will_read_block(port))
ret |= POLLIN | POLLRDNORM;
if (!will_write_block(port))
ret |= POLLOUT;
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index 2bbeaae..38df8c1 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -533,11 +533,14 @@
case KIOCSOUND:
if (!perm)
goto eperm;
- /* FIXME: This is an old broken API but we need to keep it
- supported and somehow separate the historic advertised
- tick rate from any real one */
+ /*
+ * The use of PIT_TICK_RATE is historic, it used to be
+ * the platform-dependent CLOCK_TICK_RATE between 2.6.12
+ * and 2.6.36, which was a minor but unfortunate ABI
+ * change.
+ */
if (arg)
- arg = CLOCK_TICK_RATE / arg;
+ arg = PIT_TICK_RATE / arg;
kd_mksound(arg, 0);
break;
@@ -553,11 +556,8 @@
*/
ticks = HZ * ((arg >> 16) & 0xffff) / 1000;
count = ticks ? (arg & 0xffff) : 0;
- /* FIXME: This is an old broken API but we need to keep it
- supported and somehow separate the historic advertised
- tick rate from any real one */
if (count)
- count = CLOCK_TICK_RATE / count;
+ count = PIT_TICK_RATE / count;
kd_mksound(count, ticks);
break;
}
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index 8661c84..b98c676 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -39,6 +39,10 @@
static LIST_HEAD(dca_domains);
+static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
+
+static int dca_providers_blocked;
+
static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -70,6 +74,60 @@
kfree(domain);
}
+static int dca_provider_ioat_ver_3_0(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
+ ((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
+}
+
+static void unregister_dca_providers(void)
+{
+ struct dca_provider *dca, *_dca;
+ struct list_head unregistered_providers;
+ struct dca_domain *domain;
+ unsigned long flags;
+
+ blocking_notifier_call_chain(&dca_provider_chain,
+ DCA_PROVIDER_REMOVE, NULL);
+
+ INIT_LIST_HEAD(&unregistered_providers);
+
+ spin_lock_irqsave(&dca_lock, flags);
+
+ if (list_empty(&dca_domains)) {
+ spin_unlock_irqrestore(&dca_lock, flags);
+ return;
+ }
+
+ /* at this point only one domain in the list is expected */
+ domain = list_first_entry(&dca_domains, struct dca_domain, node);
+ if (!domain)
+ return;
+
+ list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node) {
+ list_del(&dca->node);
+ list_add(&dca->node, &unregistered_providers);
+ }
+
+ dca_free_domain(domain);
+
+ spin_unlock_irqrestore(&dca_lock, flags);
+
+ list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
+ dca_sysfs_remove_provider(dca);
+ list_del(&dca->node);
+ }
+}
+
static struct dca_domain *dca_find_domain(struct pci_bus *rc)
{
struct dca_domain *domain;
@@ -90,9 +148,13 @@
domain = dca_find_domain(rc);
if (!domain) {
- domain = dca_allocate_domain(rc);
- if (domain)
- list_add(&domain->node, &dca_domains);
+ if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) {
+ dca_providers_blocked = 1;
+ } else {
+ domain = dca_allocate_domain(rc);
+ if (domain)
+ list_add(&domain->node, &dca_domains);
+ }
}
return domain;
@@ -293,8 +355,6 @@
}
EXPORT_SYMBOL_GPL(free_dca_provider);
-static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
-
/**
* register_dca_provider - register a dca provider
* @dca - struct created by alloc_dca_provider()
@@ -306,6 +366,13 @@
unsigned long flags;
struct dca_domain *domain;
+ spin_lock_irqsave(&dca_lock, flags);
+ if (dca_providers_blocked) {
+ spin_unlock_irqrestore(&dca_lock, flags);
+ return -ENODEV;
+ }
+ spin_unlock_irqrestore(&dca_lock, flags);
+
err = dca_sysfs_add_provider(dca, dev);
if (err)
return err;
@@ -313,7 +380,13 @@
spin_lock_irqsave(&dca_lock, flags);
domain = dca_get_domain(dev);
if (!domain) {
- spin_unlock_irqrestore(&dca_lock, flags);
+ if (dca_providers_blocked) {
+ spin_unlock_irqrestore(&dca_lock, flags);
+ dca_sysfs_remove_provider(dca);
+ unregister_dca_providers();
+ } else {
+ spin_unlock_irqrestore(&dca_lock, flags);
+ }
return -ENODEV;
}
list_add(&dca->node, &domain->dca_providers);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 86c5ae9..411d5bf 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -162,7 +162,7 @@
static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
{
- u32 val = (1 << (1 + (chan->idx * 16)));
+ u32 val = ~(1 << (chan->idx * 16));
dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
__raw_writel(val, XOR_INTR_CAUSE(chan));
}
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 3630308..6b21e25 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -339,6 +339,9 @@
{
int status;
+ if (mci->op_state != OP_RUNNING_POLL)
+ return;
+
status = cancel_delayed_work(&mci->work);
if (status == 0) {
debugf0("%s() not canceled, flush the queue\n",
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index be29b0b..1b05896 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -263,6 +263,7 @@
{PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, QUIRK_NO_MSI},
{PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
{PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
+ {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
{PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS},
};
diff --git a/drivers/gpio/sx150x.c b/drivers/gpio/sx150x.c
index b42f42c..823559a 100644
--- a/drivers/gpio/sx150x.c
+++ b/drivers/gpio/sx150x.c
@@ -459,16 +459,32 @@
return err;
}
+static int sx150x_reset(struct sx150x_chip *chip)
+{
+ int err;
+
+ err = i2c_smbus_write_byte_data(chip->client,
+ chip->dev_cfg->reg_reset,
+ 0x12);
+ if (err < 0)
+ return err;
+
+ err = i2c_smbus_write_byte_data(chip->client,
+ chip->dev_cfg->reg_reset,
+ 0x34);
+ return err;
+}
+
static int sx150x_init_hw(struct sx150x_chip *chip,
struct sx150x_platform_data *pdata)
{
int err = 0;
- err = i2c_smbus_write_word_data(chip->client,
- chip->dev_cfg->reg_reset,
- 0x3412);
- if (err < 0)
- return err;
+ if (pdata->reset_during_probe) {
+ err = sx150x_reset(chip);
+ if (err < 0)
+ return err;
+ }
err = sx150x_i2c_write(chip->client,
chip->dev_cfg->reg_misc,
diff --git a/drivers/gpio/tc35892-gpio.c b/drivers/gpio/tc35892-gpio.c
index 1be6288..7e10c93 100644
--- a/drivers/gpio/tc35892-gpio.c
+++ b/drivers/gpio/tc35892-gpio.c
@@ -322,6 +322,9 @@
goto out_freeirq;
}
+ if (pdata->setup)
+ pdata->setup(tc35892, tc35892_gpio->chip.base);
+
platform_set_drvdata(pdev, tc35892_gpio);
return 0;
@@ -338,9 +341,14 @@
static int __devexit tc35892_gpio_remove(struct platform_device *pdev)
{
struct tc35892_gpio *tc35892_gpio = platform_get_drvdata(pdev);
+ struct tc35892 *tc35892 = tc35892_gpio->tc35892;
+ struct tc35892_gpio_platform_data *pdata = tc35892->pdata->gpio;
int irq = platform_get_irq(pdev, 0);
int ret;
+ if (pdata->remove)
+ pdata->remove(tc35892, tc35892_gpio->chip.base);
+
ret = gpiochip_remove(&tc35892_gpio->chip);
if (ret < 0) {
dev_err(tc35892_gpio->dev,
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c
index 55d03ed..529a0db 100644
--- a/drivers/gpu/drm/drm_buffer.c
+++ b/drivers/gpu/drm/drm_buffer.c
@@ -98,8 +98,8 @@
* user_data: A pointer the data that is copied to the buffer.
* size: The Number of bytes to copy.
*/
-extern int drm_buffer_copy_from_user(struct drm_buffer *buf,
- void __user *user_data, int size)
+int drm_buffer_copy_from_user(struct drm_buffer *buf,
+ void __user *user_data, int size)
{
int nr_pages = size / PAGE_SIZE + 1;
int idx;
@@ -163,7 +163,7 @@
{
int idx = drm_buffer_index(buf);
int page = drm_buffer_page(buf);
- void *obj = 0;
+ void *obj = NULL;
if (idx + objsize <= PAGE_SIZE) {
obj = &buf->data[page][idx];
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index d2ab01e..dcbeb98 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -103,8 +103,8 @@
if (connector->funcs->force)
connector->funcs->force(connector);
} else {
- connector->status = connector->funcs->detect(connector);
- drm_helper_hpd_irq_event(dev);
+ connector->status = connector->funcs->detect(connector, true);
+ drm_kms_helper_poll_enable(dev);
}
if (connector->status == connector_status_disconnected) {
@@ -637,13 +637,13 @@
mode_changed = true;
if (mode_changed) {
- old_fb = set->crtc->fb;
- set->crtc->fb = set->fb;
set->crtc->enabled = (set->mode != NULL);
if (set->mode != NULL) {
DRM_DEBUG_KMS("attempting to set mode from"
" userspace\n");
drm_mode_debug_printmodeline(set->mode);
+ old_fb = set->crtc->fb;
+ set->crtc->fb = set->fb;
if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
set->x, set->y,
old_fb)) {
@@ -866,7 +866,7 @@
!(connector->polled & DRM_CONNECTOR_POLL_HPD))
continue;
- status = connector->funcs->detect(connector);
+ status = connector->funcs->detect(connector, false);
if (old_status != status)
changed = true;
}
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index e20f78b..f5bd9e5 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -164,6 +164,8 @@
dev->hose = pdev->sysdata;
#endif
+ mutex_lock(&drm_global_mutex);
+
if ((ret = drm_fill_in_dev(dev, ent, driver))) {
printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
goto err_g2;
@@ -199,6 +201,7 @@
driver->name, driver->major, driver->minor, driver->patchlevel,
driver->date, pci_name(pdev), dev->primary->index);
+ mutex_unlock(&drm_global_mutex);
return 0;
err_g4:
@@ -210,6 +213,7 @@
pci_disable_device(pdev);
err_g1:
kfree(dev);
+ mutex_unlock(&drm_global_mutex);
return ret;
}
EXPORT_SYMBOL(drm_get_pci_dev);
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index 460e9a3..92d1d0f 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -53,6 +53,8 @@
dev->platformdev = platdev;
dev->dev = &platdev->dev;
+ mutex_lock(&drm_global_mutex);
+
ret = drm_fill_in_dev(dev, NULL, driver);
if (ret) {
@@ -87,6 +89,8 @@
list_add_tail(&dev->driver_item, &driver->device_list);
+ mutex_unlock(&drm_global_mutex);
+
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
driver->name, driver->major, driver->minor, driver->patchlevel,
driver->date, dev->primary->index);
@@ -100,6 +104,7 @@
drm_put_minor(&dev->control);
err_g1:
kfree(dev);
+ mutex_unlock(&drm_global_mutex);
return ret;
}
EXPORT_SYMBOL(drm_get_platform_dev);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 86118a7..85da4c4 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -159,7 +159,7 @@
struct drm_connector *connector = to_drm_connector(device);
enum drm_connector_status status;
- status = connector->funcs->detect(connector);
+ status = connector->funcs->detect(connector, true);
return snprintf(buf, PAGE_SIZE, "%s\n",
drm_get_connector_status_name(status));
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 216deb5..6dbe14c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -170,6 +170,7 @@
INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */
INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */
INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */
+ INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */
INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 16fca1d..bced9b2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2351,14 +2351,21 @@
reg->obj = obj;
- if (IS_GEN6(dev))
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
sandybridge_write_fence_reg(reg);
- else if (IS_I965G(dev))
+ break;
+ case 5:
+ case 4:
i965_write_fence_reg(reg);
- else if (IS_I9XX(dev))
+ break;
+ case 3:
i915_write_fence_reg(reg);
- else
+ break;
+ case 2:
i830_write_fence_reg(reg);
+ break;
+ }
trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
obj_priv->tiling_mode);
@@ -2381,22 +2388,26 @@
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_i915_fence_reg *reg =
&dev_priv->fence_regs[obj_priv->fence_reg];
+ uint32_t fence_reg;
- if (IS_GEN6(dev)) {
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
(obj_priv->fence_reg * 8), 0);
- } else if (IS_I965G(dev)) {
+ break;
+ case 5:
+ case 4:
I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
- } else {
- uint32_t fence_reg;
-
- if (obj_priv->fence_reg < 8)
- fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
+ break;
+ case 3:
+ if (obj_priv->fence_reg >= 8)
+ fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
else
- fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
- 8) * 4;
+ case 2:
+ fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
I915_WRITE(fence_reg, 0);
+ break;
}
reg->obj = NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 72cae3c..e85246e 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -79,6 +79,7 @@
struct list_head *unwind)
{
list_add(&obj_priv->evict_list, unwind);
+ drm_gem_object_reference(&obj_priv->base);
return drm_mm_scan_add_block(obj_priv->gtt_space);
}
@@ -165,6 +166,7 @@
list_for_each_entry(obj_priv, &unwind_list, evict_list) {
ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
BUG_ON(ret);
+ drm_gem_object_unreference(&obj_priv->base);
}
/* We expect the caller to unpin, evict all and try again, or give up.
@@ -181,18 +183,21 @@
* scanning, therefore store to be evicted objects on a
* temporary list. */
list_move(&obj_priv->evict_list, &eviction_list);
- }
+ } else
+ drm_gem_object_unreference(&obj_priv->base);
}
/* Unbinding will emit any required flushes */
list_for_each_entry_safe(obj_priv, tmp_obj_priv,
&eviction_list, evict_list) {
#if WATCH_LRU
- DRM_INFO("%s: evicting %p\n", __func__, obj);
+ DRM_INFO("%s: evicting %p\n", __func__, &obj_priv->base);
#endif
ret = i915_gem_object_unbind(&obj_priv->base);
if (ret)
return ret;
+
+ drm_gem_object_unreference(&obj_priv->base);
}
/* The just created free hole should be on the top of the free stack
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 59457e8..744225e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1350,17 +1350,25 @@
i915_seqno_passed(i915_get_gem_seqno(dev,
&dev_priv->render_ring),
i915_get_tail_request(dev)->seqno)) {
+ bool missed_wakeup = false;
+
dev_priv->hangcheck_count = 0;
/* Issue a wake-up to catch stuck h/w. */
- if (dev_priv->render_ring.waiting_gem_seqno |
- dev_priv->bsd_ring.waiting_gem_seqno) {
- DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
- if (dev_priv->render_ring.waiting_gem_seqno)
- DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
- if (dev_priv->bsd_ring.waiting_gem_seqno)
- DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+ if (dev_priv->render_ring.waiting_gem_seqno &&
+ waitqueue_active(&dev_priv->render_ring.irq_queue)) {
+ DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
+ missed_wakeup = true;
}
+
+ if (dev_priv->bsd_ring.waiting_gem_seqno &&
+ waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
+ DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+ missed_wakeup = true;
+ }
+
+ if (missed_wakeup)
+ DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
return;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index d094e91..4f5e155 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2206,9 +2206,17 @@
#define WM1_LP_SR_EN (1<<31)
#define WM1_LP_LATENCY_SHIFT 24
#define WM1_LP_LATENCY_MASK (0x7f<<24)
+#define WM1_LP_FBC_LP1_MASK (0xf<<20)
+#define WM1_LP_FBC_LP1_SHIFT 20
#define WM1_LP_SR_MASK (0x1ff<<8)
#define WM1_LP_SR_SHIFT 8
#define WM1_LP_CURSOR_MASK (0x3f)
+#define WM2_LP_ILK 0x4510c
+#define WM2_LP_EN (1<<31)
+#define WM3_LP_ILK 0x45110
+#define WM3_LP_EN (1<<31)
+#define WM1S_LP_ILK 0x45120
+#define WM1S_LP_EN (1<<31)
/* Memory latency timer register */
#define MLTR_ILK 0x11222
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 2c6b98f..31f0858 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -789,16 +789,25 @@
dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
/* Fences */
- if (IS_I965G(dev)) {
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
+ for (i = 0; i < 16; i++)
+ dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+ break;
+ case 5:
+ case 4:
for (i = 0; i < 16; i++)
dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
- } else {
- for (i = 0; i < 8; i++)
- dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
-
+ break;
+ case 3:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ case 2:
+ for (i = 0; i < 8; i++)
+ dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+ break;
+
}
return 0;
@@ -815,15 +824,24 @@
I915_WRITE(HWS_PGA, dev_priv->saveHWS);
/* Fences */
- if (IS_I965G(dev)) {
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
+ break;
+ case 5:
+ case 4:
for (i = 0; i < 16; i++)
I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
- } else {
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
+ break;
+ case 3:
+ case 2:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
+ break;
}
i915_restore_display(dev);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 4b77351..197d4f3 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -188,7 +188,7 @@
if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
1000, 1))
- DRM_ERROR("timed out waiting for FORCE_TRIGGER");
+ DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
if (turn_off_dac) {
I915_WRITE(PCH_ADPA, temp);
@@ -245,7 +245,7 @@
if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
CRT_HOTPLUG_FORCE_DETECT) == 0,
1000, 1))
- DRM_ERROR("timed out waiting for FORCE_DETECT to go off");
+ DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
}
stat = I915_READ(PORT_HOTPLUG_STAT);
@@ -400,7 +400,8 @@
return status;
}
-static enum drm_connector_status intel_crt_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_crt_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
struct drm_encoder *encoder = intel_attached_encoder(connector);
@@ -419,6 +420,9 @@
if (intel_crt_detect_ddc(encoder))
return connector_status_connected;
+ if (!force)
+ return connector->status;
+
/* for pre-945g platforms use load detect */
if (encoder->crtc && encoder->crtc->enabled) {
status = intel_crt_load_detect(encoder->crtc, intel_encoder);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 40cc5da..b5bf51a 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2463,11 +2463,19 @@
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
+
if (HAS_PCH_SPLIT(dev)) {
/* FDI link clock is fixed at 2.7G */
if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
return false;
}
+
+ /* XXX some encoders set the crtcinfo, others don't.
+ * Obviously we need some form of conflict resolution here...
+ */
+ if (adjusted_mode->crtc_htotal == 0)
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+
return true;
}
@@ -2767,14 +2775,8 @@
/* Don't promote wm_size to unsigned... */
if (wm_size > (long)wm->max_wm)
wm_size = wm->max_wm;
- if (wm_size <= 0) {
+ if (wm_size <= 0)
wm_size = wm->default_wm;
- DRM_ERROR("Insufficient FIFO for plane, expect flickering:"
- " entries required = %ld, available = %lu.\n",
- entries_required + wm->guard_size,
- wm->fifo_size);
- }
-
return wm_size;
}
@@ -3388,8 +3390,7 @@
reg_value = I915_READ(WM1_LP_ILK);
reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
WM1_LP_CURSOR_MASK);
- reg_value |= WM1_LP_SR_EN |
- (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
+ reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
(sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
I915_WRITE(WM1_LP_ILK, reg_value);
@@ -5675,6 +5676,9 @@
I915_WRITE(DISP_ARB_CTL,
(I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
}
/*
* Based on the document from hardware guys the following bits
@@ -5696,8 +5700,7 @@
ILK_DPFC_DIS2 |
ILK_CLK_FBC);
}
- if (IS_GEN6(dev))
- return;
+ return;
} else if (IS_G4X(dev)) {
uint32_t dspclk_gate;
I915_WRITE(RENCLK_GATE_D1, 0);
@@ -5758,11 +5761,9 @@
OUT_RING(MI_FLUSH);
ADVANCE_LP_RING();
}
- } else {
+ } else
DRM_DEBUG_KMS("Failed to allocate render context."
- "Disable RC6\n");
- return;
- }
+ "Disable RC6\n");
}
if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 51d1429..1a51ee0 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1386,7 +1386,7 @@
* \return false if DP port is disconnected.
*/
static enum drm_connector_status
-intel_dp_detect(struct drm_connector *connector)
+intel_dp_detect(struct drm_connector *connector, bool force)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index a399f4b..7c9ec14 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -221,7 +221,8 @@
*
* Unimplemented.
*/
-static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_dvo_detect(struct drm_connector *connector, bool force)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index ccd4c97..926934a 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -139,7 +139,7 @@
}
static enum drm_connector_status
-intel_hdmi_detect(struct drm_connector *connector)
+intel_hdmi_detect(struct drm_connector *connector, bool force)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b819c10..6ec39a8 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -445,7 +445,8 @@
* connected and closed means disconnected. We also send hotplug events as
* needed, using lid status notification from the input layer.
*/
-static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_lvds_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
enum drm_connector_status status = connector_status_connected;
@@ -540,7 +541,9 @@
* the LID nofication event.
*/
if (connector)
- connector->status = connector->funcs->detect(connector);
+ connector->status = connector->funcs->detect(connector,
+ false);
+
/* Don't force modeset on machines where it causes a GPU lockup */
if (dmi_check_system(intel_no_modeset_on_lid))
return NOTIFY_OK;
@@ -875,8 +878,6 @@
intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
intel_encoder->crtc_mask = (1 << 1);
- if (IS_I965G(dev))
- intel_encoder->crtc_mask |= (1 << 0);
drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e3b7a7e..ee73e42 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1417,7 +1417,7 @@
if (!analog_connector)
return false;
- if (analog_connector->funcs->detect(analog_connector) ==
+ if (analog_connector->funcs->detect(analog_connector, false) ==
connector_status_disconnected)
return false;
@@ -1486,7 +1486,8 @@
return status;
}
-static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_sdvo_detect(struct drm_connector *connector, bool force)
{
uint16_t response;
struct drm_encoder *encoder = intel_attached_encoder(connector);
@@ -2169,8 +2170,7 @@
return true;
err:
- intel_sdvo_destroy_enhance_property(connector);
- kfree(intel_sdvo_connector);
+ intel_sdvo_destroy(connector);
return false;
}
@@ -2242,8 +2242,7 @@
return true;
err:
- intel_sdvo_destroy_enhance_property(connector);
- kfree(intel_sdvo_connector);
+ intel_sdvo_destroy(connector);
return false;
}
@@ -2521,11 +2520,10 @@
uint16_t response;
} enhancements;
- if (!intel_sdvo_get_value(intel_sdvo,
- SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
- &enhancements, sizeof(enhancements)))
- return false;
-
+ enhancements.response = 0;
+ intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+ &enhancements, sizeof(enhancements));
if (enhancements.response == 0) {
DRM_DEBUG_KMS("No enhancement is supported\n");
return true;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index c671f60..4a117e3 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1341,7 +1341,7 @@
* we have a pipe programmed in order to probe the TV.
*/
static enum drm_connector_status
-intel_tv_detect(struct drm_connector *connector)
+intel_tv_detect(struct drm_connector *connector, bool force)
{
struct drm_display_mode mode;
struct drm_encoder *encoder = intel_attached_encoder(connector);
@@ -1353,7 +1353,7 @@
if (encoder->crtc && encoder->crtc->enabled) {
type = intel_tv_detect_type(intel_tv);
- } else {
+ } else if (force) {
struct drm_crtc *crtc;
int dpms_mode;
@@ -1364,10 +1364,9 @@
intel_release_load_detect_pipe(&intel_tv->base, connector,
dpms_mode);
} else
- type = -1;
- }
-
- intel_tv->type = type;
+ return connector_status_unknown;
+ } else
+ return connector->status;
if (type < 0)
return connector_status_disconnected;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index a1473ff..fc73703 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -168,7 +168,7 @@
}
static enum drm_connector_status
-nouveau_connector_detect(struct drm_connector *connector)
+nouveau_connector_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
@@ -246,7 +246,7 @@
}
static enum drm_connector_status
-nouveau_connector_detect_lvds(struct drm_connector *connector)
+nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -267,7 +267,7 @@
/* Try retrieving EDID via DDC */
if (!dev_priv->vbios.fp_no_ddc) {
- status = nouveau_connector_detect(connector);
+ status = nouveau_connector_detect(connector, force);
if (status == connector_status_connected)
goto out;
}
@@ -558,8 +558,10 @@
if (nv_encoder->dcb->type == OUTPUT_LVDS &&
(nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) {
- nv_connector->native_mode = drm_mode_create(dev);
- nouveau_bios_fp_mode(dev, nv_connector->native_mode);
+ struct drm_display_mode mode;
+
+ nouveau_bios_fp_mode(dev, &mode);
+ nv_connector->native_mode = drm_mode_duplicate(dev, &mode);
}
/* Find the native mode if this is a digital panel, if we didn't
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 1bc72c3..fe359a2 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -4999,7 +4999,7 @@
#define SW_I2C_CNTL_WRITE1BIT 6
//==============================VESA definition Portion===============================
-#define VESA_OEM_PRODUCT_REV '01.00'
+#define VESA_OEM_PRODUCT_REV "01.00"
#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support
#define VESA_MODE_WIN_ATTRIBUTE 7
#define VESA_WIN_SIZE 64
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 464a81a..cd0290f 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -539,14 +539,15 @@
pll->algo = PLL_ALGO_LEGACY;
pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
}
- /* There is some evidence (often anecdotal) that RV515 LVDS
+ /* There is some evidence (often anecdotal) that RV515/RV620 LVDS
* (on some boards at least) prefers the legacy algo. I'm not
* sure whether this should handled generically or on a
* case-by-case quirk basis. Both algos should work fine in the
* majority of cases.
*/
if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) &&
- (rdev->family == CHIP_RV515)) {
+ ((rdev->family == CHIP_RV515) ||
+ (rdev->family == CHIP_RV620))) {
/* allow the user to overrride just in case */
if (radeon_new_pll == 1)
pll->algo = PLL_ALGO_NEW;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index b8b7f01..79082d4 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1160,14 +1160,25 @@
EVERGREEN_MAX_BACKENDS_MASK));
break;
}
- } else
- gb_backend_map =
- evergreen_get_tile_pipe_to_backend_map(rdev,
- rdev->config.evergreen.max_tile_pipes,
- rdev->config.evergreen.max_backends,
- ((EVERGREEN_MAX_BACKENDS_MASK <<
- rdev->config.evergreen.max_backends) &
- EVERGREEN_MAX_BACKENDS_MASK));
+ } else {
+ switch (rdev->family) {
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ gb_backend_map = 0x66442200;
+ break;
+ case CHIP_JUNIPER:
+ gb_backend_map = 0x00006420;
+ break;
+ default:
+ gb_backend_map =
+ evergreen_get_tile_pipe_to_backend_map(rdev,
+ rdev->config.evergreen.max_tile_pipes,
+ rdev->config.evergreen.max_backends,
+ ((EVERGREEN_MAX_BACKENDS_MASK <<
+ rdev->config.evergreen.max_backends) &
+ EVERGREEN_MAX_BACKENDS_MASK));
+ }
+ }
rdev->config.evergreen.tile_config = gb_addr_config;
WREG32(GB_BACKEND_MAP, gb_backend_map);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index e817a0b..e151f16 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2020,18 +2020,7 @@
return false;
}
elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
- if (elapsed >= 3000) {
- /* very likely the improbable case where current
- * rptr is equal to last recorded, a while ago, rptr
- * this is more likely a false positive update tracking
- * information which should force us to be recall at
- * latter point
- */
- lockup->last_cp_rptr = cp->rptr;
- lockup->last_jiffies = jiffies;
- return false;
- }
- if (elapsed >= 1000) {
+ if (elapsed >= 10000) {
dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
return true;
}
@@ -3308,13 +3297,14 @@
unsigned long size;
unsigned prim_walk;
unsigned nverts;
+ unsigned num_cb = track->num_cb;
- for (i = 0; i < track->num_cb; i++) {
+ if (!track->zb_cb_clear && !track->color_channel_mask &&
+ !track->blend_read_enable)
+ num_cb = 0;
+
+ for (i = 0; i < num_cb; i++) {
if (track->cb[i].robj == NULL) {
- if (!(track->zb_cb_clear || track->color_channel_mask ||
- track->blend_read_enable)) {
- continue;
- }
DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index afc18d8..ddc3ade 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2729,7 +2729,7 @@
if (i < rdev->usec_timeout) {
DRM_INFO("ib test succeeded in %u usecs\n", i);
} else {
- DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
+ DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp);
r = -EINVAL;
}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index d13622a..9ceb2a1 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -1,3 +1,28 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
#include "drmP.h"
#include "drm.h"
#include "radeon_drm.h"
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.h b/drivers/gpu/drm/radeon/r600_blit_shaders.h
index fdc3b37..f437d36 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.h
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.h
@@ -1,3 +1,27 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
#ifndef R600_BLIT_SHADERS_H
#define R600_BLIT_SHADERS_H
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index d886494..250a3a9 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -1170,9 +1170,8 @@
/* using get ib will give us the offset into the mipmap bo */
word0 = radeon_get_ib_value(p, idx + 3) << 8;
if ((mipmap_size + word0) > radeon_bo_size(mipmap)) {
- dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
- w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));
- return -EINVAL;
+ /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
+ w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));*/
}
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index bd74e42..a04b7a6 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1485,6 +1485,11 @@
/* PowerMac8,1 ? */
/* imac g5 isight */
rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT;
+ } else if ((rdev->pdev->device == 0x4a48) &&
+ (rdev->pdev->subsystem_vendor == 0x1002) &&
+ (rdev->pdev->subsystem_device == 0x4a48)) {
+ /* Mac X800 */
+ rdev->mode_info.connector_table = CT_MAC_X800;
} else
#endif /* CONFIG_PPC_PMAC */
#ifdef CONFIG_PPC64
@@ -1961,6 +1966,48 @@
CONNECTOR_OBJECT_ID_VGA,
&hpd);
break;
+ case CT_MAC_X800:
+ DRM_INFO("Connector Table: %d (mac x800)\n",
+ rdev->mode_info.connector_table);
+ /* DVI - primary dac, internal tmds */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+ hpd.hpd = RADEON_HPD_1; /* ??? */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_DFP1_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP1_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_connector(dev, 0,
+ ATOM_DEVICE_DFP1_SUPPORT |
+ ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
+ /* DVI - tv dac, dvo */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+ hpd.hpd = RADEON_HPD_2; /* ??? */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_DFP2_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP2_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ radeon_add_legacy_connector(dev, 1,
+ ATOM_DEVICE_DFP2_SUPPORT |
+ ATOM_DEVICE_CRT2_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
+ &hpd);
+ break;
default:
DRM_INFO("Connector table: %d (invalid)\n",
rdev->mode_info.connector_table);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index a9dd784..ecc1a8f 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -481,7 +481,8 @@
return MODE_OK;
}
-static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector)
+static enum drm_connector_status
+radeon_lvds_detect(struct drm_connector *connector, bool force)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
@@ -594,7 +595,8 @@
return MODE_OK;
}
-static enum drm_connector_status radeon_vga_detect(struct drm_connector *connector)
+static enum drm_connector_status
+radeon_vga_detect(struct drm_connector *connector, bool force)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder;
@@ -691,7 +693,8 @@
return MODE_OK;
}
-static enum drm_connector_status radeon_tv_detect(struct drm_connector *connector)
+static enum drm_connector_status
+radeon_tv_detect(struct drm_connector *connector, bool force)
{
struct drm_encoder *encoder;
struct drm_encoder_helper_funcs *encoder_funcs;
@@ -748,7 +751,8 @@
* we have to check if this analog encoder is shared with anyone else (TV)
* if its shared we have to set the other connector to disconnected.
*/
-static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connector)
+static enum drm_connector_status
+radeon_dvi_detect(struct drm_connector *connector, bool force)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder = NULL;
@@ -972,7 +976,8 @@
return ret;
}
-static enum drm_connector_status radeon_dp_detect(struct drm_connector *connector)
+static enum drm_connector_status
+radeon_dp_detect(struct drm_connector *connector, bool force)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
enum drm_connector_status ret = connector_status_disconnected;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 6dd434a..127a395f 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1140,17 +1140,18 @@
radeon_crtc->rmx_type = radeon_encoder->rmx_type;
else
radeon_crtc->rmx_type = RMX_OFF;
- src_v = crtc->mode.vdisplay;
- dst_v = radeon_crtc->native_mode.vdisplay;
- src_h = crtc->mode.hdisplay;
- dst_h = radeon_crtc->native_mode.vdisplay;
/* copy native mode */
memcpy(&radeon_crtc->native_mode,
&radeon_encoder->native_mode,
sizeof(struct drm_display_mode));
+ src_v = crtc->mode.vdisplay;
+ dst_v = radeon_crtc->native_mode.vdisplay;
+ src_h = crtc->mode.hdisplay;
+ dst_h = radeon_crtc->native_mode.hdisplay;
/* fix up for overscan on hdmi */
if (ASIC_IS_AVIVO(rdev) &&
+ (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
drm_detect_hdmi_monitor(radeon_connector->edid) &&
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 5eee3c4..8fbbe1c 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -203,6 +203,10 @@
*/
int radeon_driver_firstopen_kms(struct drm_device *dev)
{
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (rdev->powered_down)
+ return -EINVAL;
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index efbe975..17a6602 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -204,7 +204,7 @@
/* mostly for macs, but really any system without connector tables */
enum radeon_connector_table {
- CT_NONE,
+ CT_NONE = 0,
CT_GENERIC,
CT_IBOOK,
CT_POWERBOOK_EXTERNAL,
@@ -215,6 +215,7 @@
CT_IMAC_G5_ISIGHT,
CT_EMAC,
CT_RN50_POWER,
+ CT_MAC_X800,
};
enum radeon_dvo_chip {
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 7cffb3e..3451a82 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -351,6 +351,7 @@
INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->swap);
fbo->vm_node = NULL;
+ atomic_set(&fbo->cpu_writers, 0);
fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
kref_init(&fbo->list_kref);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index ca90479..b1e02ff 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -69,7 +69,7 @@
spinlock_t lock;
bool fill_lock;
struct list_head list;
- int gfp_flags;
+ gfp_t gfp_flags;
unsigned npages;
char *name;
unsigned long nfrees;
@@ -475,7 +475,7 @@
* This function is reentrant if caller updates count depending on number of
* pages returned in pages array.
*/
-static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
+static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
int ttm_flags, enum ttm_caching_state cstate, unsigned count)
{
struct page **caching_array;
@@ -666,7 +666,7 @@
{
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct page *p = NULL;
- int gfp_flags = GFP_USER;
+ gfp_t gfp_flags = GFP_USER;
int r;
/* set zero flag for page allocation if required */
@@ -818,7 +818,7 @@
return 0;
}
-void ttm_page_alloc_fini()
+void ttm_page_alloc_fini(void)
{
int i;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 2ff5cf7..7083b1a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -335,7 +335,8 @@
}
static enum drm_connector_status
- vmw_ldu_connector_detect(struct drm_connector *connector)
+ vmw_ldu_connector_detect(struct drm_connector *connector,
+ bool force)
{
if (vmw_connector_to_ldu(connector)->pref_active)
return connector_status_connected;
@@ -516,7 +517,7 @@
drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- connector->status = vmw_ldu_connector_detect(connector);
+ connector->status = vmw_ldu_connector_detect(connector, true);
drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
DRM_MODE_ENCODER_LVDS);
@@ -610,7 +611,7 @@
ldu->pref_height = 600;
ldu->pref_active = false;
}
- con->status = vmw_ldu_connector_detect(con);
+ con->status = vmw_ldu_connector_detect(con, true);
}
mutex_unlock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index b87569e..f366f96 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -598,7 +598,7 @@
pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
}
-void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
+static void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
{
struct vga_device *vgadev;
unsigned long flags;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 0c52899..3f72924 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1285,8 +1285,11 @@
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
@@ -1578,7 +1581,6 @@
{ HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT)},
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)},
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)},
{ HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 85c6d13..765a4f5 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -105,6 +105,7 @@
#define USB_VENDOR_ID_ASUS 0x0486
#define USB_DEVICE_ID_ASUS_T91MT 0x0185
+#define USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO 0x0186
#define USB_VENDOR_ID_ASUSTEK 0x0b05
#define USB_DEVICE_ID_ASUSTEK_LCM 0x1726
@@ -128,6 +129,7 @@
#define USB_VENDOR_ID_BTC 0x046e
#define USB_DEVICE_ID_BTC_EMPREX_REMOTE 0x5578
+#define USB_DEVICE_ID_BTC_EMPREX_REMOTE_2 0x5577
#define USB_VENDOR_ID_CANDO 0x2087
#define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01
@@ -149,6 +151,7 @@
#define USB_VENDOR_ID_CHICONY 0x04f2
#define USB_DEVICE_ID_CHICONY_TACTICAL_PAD 0x0418
+#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d
#define USB_VENDOR_ID_CIDC 0x1677
@@ -507,6 +510,7 @@
#define USB_VENDOR_ID_UCLOGIC 0x5543
#define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209 0x0042
#define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U 0x0003
+#define USB_DEVICE_ID_UCLOGIC_TABLET_KNA5 0x6001
#define USB_VENDOR_ID_VERNIER 0x08f7
#define USB_DEVICE_ID_VERNIER_LABPRO 0x0001
diff --git a/drivers/hid/hid-mosart.c b/drivers/hid/hid-mosart.c
index e91437c..ac5421d 100644
--- a/drivers/hid/hid-mosart.c
+++ b/drivers/hid/hid-mosart.c
@@ -239,6 +239,7 @@
static const struct hid_device_id mosart_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) },
{ }
};
MODULE_DEVICE_TABLE(hid, mosart_devices);
diff --git a/drivers/hid/hid-topseed.c b/drivers/hid/hid-topseed.c
index 5771f85..956ed9a 100644
--- a/drivers/hid/hid-topseed.c
+++ b/drivers/hid/hid-topseed.c
@@ -64,6 +64,7 @@
static const struct hid_device_id ts_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
{ }
};
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index b729c02..599041a 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -828,6 +828,7 @@
}
} else {
int skipped_report_id = 0;
+ int report_id = buf[0];
if (buf[0] == 0x0) {
/* Don't send the Report ID */
buf++;
@@ -837,7 +838,7 @@
ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
HID_REQ_SET_REPORT,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- ((report_type + 1) << 8) | *buf,
+ ((report_type + 1) << 8) | report_id,
interface->desc.bInterfaceNumber, buf, count,
USB_CTRL_SET_TIMEOUT);
/* count also the report id, if this was a numbered report. */
@@ -1445,6 +1446,11 @@
{ }
};
+struct usb_interface *usbhid_find_interface(int minor)
+{
+ return usb_find_interface(&hid_driver, minor);
+}
+
static struct hid_driver hid_usb_driver = {
.name = "generic-usb",
.id_table = hid_usb_table,
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 2643d31..70da318 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -33,6 +33,7 @@
{ USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD },
{ USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD },
{ USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
+ { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
@@ -69,6 +70,7 @@
{ USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
{ USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
@@ -77,6 +79,8 @@
{ USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL, HID_QUIRK_HIDINPUT_FORCE },
+ { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT },
+
{ 0, 0 }
};
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 0a29c51..681e620 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -270,7 +270,7 @@
struct hiddev *hiddev;
int res;
- intf = usb_find_interface(&hiddev_driver, iminor(inode));
+ intf = usbhid_find_interface(iminor(inode));
if (!intf)
return -ENODEV;
hid = usb_get_intfdata(intf);
diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h
index 693fd3e..89d2e84 100644
--- a/drivers/hid/usbhid/usbhid.h
+++ b/drivers/hid/usbhid/usbhid.h
@@ -42,6 +42,7 @@
(struct hid_device *hid, struct hid_report *report, unsigned char dir);
int usbhid_get_power(struct hid_device *hid);
void usbhid_put_power(struct hid_device *hid);
+struct usb_interface *usbhid_find_interface(int minor);
/* iofl flags */
#define HID_CTRL_RUNNING 1
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 4d4d09b..97499d0 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -409,7 +409,7 @@
config SENSORS_PKGTEMP
tristate "Intel processor package temperature sensor"
- depends on X86 && PCI && EXPERIMENTAL
+ depends on X86 && EXPERIMENTAL
help
If you say yes here you get support for the package level temperature
sensor inside your CPU. Check documentation/driver for details.
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 15c1a96..0683e6b 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -79,7 +79,7 @@
int chip_type;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
- unsigned int update_rate; /* In milliseconds */
+ unsigned int update_interval; /* In milliseconds */
/* The chan_select_table contains the possible configurations for
* auto fan control.
*/
@@ -743,23 +743,23 @@
static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 13);
static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14);
-/* Update Rate */
-static const unsigned int update_rates[] = {
+/* Update Interval */
+static const unsigned int update_intervals[] = {
16000, 8000, 4000, 2000, 1000, 500, 250, 125,
};
-static ssize_t show_update_rate(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t show_update_interval(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
struct adm1031_data *data = i2c_get_clientdata(client);
- return sprintf(buf, "%u\n", data->update_rate);
+ return sprintf(buf, "%u\n", data->update_interval);
}
-static ssize_t set_update_rate(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t set_update_interval(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct adm1031_data *data = i2c_get_clientdata(client);
@@ -771,12 +771,15 @@
if (err)
return err;
- /* find the nearest update rate from the table */
- for (i = 0; i < ARRAY_SIZE(update_rates) - 1; i++) {
- if (val >= update_rates[i])
+ /*
+ * Find the nearest update interval from the table.
+ * Use it to determine the matching update rate.
+ */
+ for (i = 0; i < ARRAY_SIZE(update_intervals) - 1; i++) {
+ if (val >= update_intervals[i])
break;
}
- /* if not found, we point to the last entry (lowest update rate) */
+ /* if not found, we point to the last entry (lowest update interval) */
/* set the new update rate while preserving other settings */
reg = adm1031_read_value(client, ADM1031_REG_FAN_FILTER);
@@ -785,14 +788,14 @@
adm1031_write_value(client, ADM1031_REG_FAN_FILTER, reg);
mutex_lock(&data->update_lock);
- data->update_rate = update_rates[i];
+ data->update_interval = update_intervals[i];
mutex_unlock(&data->update_lock);
return count;
}
-static DEVICE_ATTR(update_rate, S_IRUGO | S_IWUSR, show_update_rate,
- set_update_rate);
+static DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR, show_update_interval,
+ set_update_interval);
static struct attribute *adm1031_attributes[] = {
&sensor_dev_attr_fan1_input.dev_attr.attr,
@@ -830,7 +833,7 @@
&sensor_dev_attr_auto_fan1_min_pwm.dev_attr.attr,
- &dev_attr_update_rate.attr,
+ &dev_attr_update_interval.attr,
&dev_attr_alarms.attr,
NULL
@@ -981,7 +984,8 @@
mask = ADM1031_UPDATE_RATE_MASK;
read_val = adm1031_read_value(client, ADM1031_REG_FAN_FILTER);
i = (read_val & mask) >> ADM1031_UPDATE_RATE_SHIFT;
- data->update_rate = update_rates[i];
+ /* Save it as update interval */
+ data->update_interval = update_intervals[i];
}
static struct adm1031_data *adm1031_update_device(struct device *dev)
@@ -993,7 +997,8 @@
mutex_lock(&data->update_lock);
- next_update = data->last_updated + msecs_to_jiffies(data->update_rate);
+ next_update = data->last_updated
+ + msecs_to_jiffies(data->update_interval);
if (time_after(jiffies, next_update) || !data->valid) {
dev_dbg(&client->dev, "Starting adm1031 update\n");
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index de81111..a23b17a 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -36,6 +36,7 @@
#include <linux/pci.h>
#include <asm/msr.h>
#include <asm/processor.h>
+#include <asm/smp.h>
#define DRVNAME "coretemp"
@@ -423,9 +424,18 @@
int err;
struct platform_device *pdev;
struct pdev_entry *pdev_entry;
-#ifdef CONFIG_SMP
struct cpuinfo_x86 *c = &cpu_data(cpu);
-#endif
+
+ /*
+ * CPUID.06H.EAX[0] indicates whether the CPU has thermal
+ * sensors. We check this bit only, all the early CPUs
+ * without thermal sensors will be filtered out.
+ */
+ if (!cpu_has(c, X86_FEATURE_DTS)) {
+ printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
+ " has no thermal sensor.\n", c->x86_model);
+ return 0;
+ }
mutex_lock(&pdev_list_mutex);
@@ -482,14 +492,22 @@
static void coretemp_device_remove(unsigned int cpu)
{
- struct pdev_entry *p, *n;
+ struct pdev_entry *p;
+ unsigned int i;
+
mutex_lock(&pdev_list_mutex);
- list_for_each_entry_safe(p, n, &pdev_list, list) {
- if (p->cpu == cpu) {
- platform_device_unregister(p->pdev);
- list_del(&p->list);
- kfree(p);
- }
+ list_for_each_entry(p, &pdev_list, list) {
+ if (p->cpu != cpu)
+ continue;
+
+ platform_device_unregister(p->pdev);
+ list_del(&p->list);
+ mutex_unlock(&pdev_list_mutex);
+ kfree(p);
+ for_each_cpu(i, cpu_sibling_mask(cpu))
+ if (i != cpu && !coretemp_device_add(i))
+ break;
+ return;
}
mutex_unlock(&pdev_list_mutex);
}
@@ -527,30 +545,21 @@
if (err)
goto exit;
- for_each_online_cpu(i) {
- struct cpuinfo_x86 *c = &cpu_data(i);
- /*
- * CPUID.06H.EAX[0] indicates whether the CPU has thermal
- * sensors. We check this bit only, all the early CPUs
- * without thermal sensors will be filtered out.
- */
- if (c->cpuid_level >= 6 && (cpuid_eax(0x06) & 0x01))
- coretemp_device_add(i);
- else {
- printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
- " has no thermal sensor.\n", c->x86_model);
- }
- }
+ for_each_online_cpu(i)
+ coretemp_device_add(i);
+
+#ifndef CONFIG_HOTPLUG_CPU
if (list_empty(&pdev_list)) {
err = -ENODEV;
goto exit_driver_unreg;
}
+#endif
register_hotcpu_notifier(&coretemp_cpu_notifier);
return 0;
-exit_driver_unreg:
#ifndef CONFIG_HOTPLUG_CPU
+exit_driver_unreg:
platform_driver_unregister(&coretemp_driver);
#endif
exit:
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index 5b58b20..8dee3f3 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -308,7 +308,6 @@
res = sysfs_create_group(&client->dev.kobj, &m_thermal_gr);
if (res) {
dev_warn(&client->dev, "create group failed\n");
- hwmon_device_unregister(data->hwmon_dev);
goto thermal_error1;
}
data->hwmon_dev = hwmon_device_register(&client->dev);
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index 0f58ecc..9638d58 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -79,7 +79,7 @@
#define F75375_REG_PWM2_DROP_DUTY 0x6C
#define FAN_CTRL_LINEAR(nr) (4 + nr)
-#define FAN_CTRL_MODE(nr) (5 + ((nr) * 2))
+#define FAN_CTRL_MODE(nr) (4 + ((nr) * 2))
/*
* Data structures and manipulation thereof
@@ -298,7 +298,7 @@
return -EINVAL;
fanmode = f75375_read8(client, F75375_REG_FAN_TIMER);
- fanmode = ~(3 << FAN_CTRL_MODE(nr));
+ fanmode &= ~(3 << FAN_CTRL_MODE(nr));
switch (val) {
case 0: /* Full speed */
@@ -350,7 +350,7 @@
mutex_lock(&data->update_lock);
conf = f75375_read8(client, F75375_REG_CONFIG1);
- conf = ~(1 << FAN_CTRL_LINEAR(nr));
+ conf &= ~(1 << FAN_CTRL_LINEAR(nr));
if (val == 0)
conf |= (1 << FAN_CTRL_LINEAR(nr)) ;
diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c
index 7580f55..36e9575 100644
--- a/drivers/hwmon/hp_accel.c
+++ b/drivers/hwmon/hp_accel.c
@@ -221,6 +221,8 @@
AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
+ AXIS_DMI_MATCH("HPB532x", "HP ProBook 532", y_inverted),
+ AXIS_DMI_MATCH("Mini5102", "HP Mini 5102", xy_rotated_left_usd),
{ NULL, }
/* Laptop models without axis info (yet):
* "NC6910" "HP Compaq 6910"
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
index 6138f03..fc591ae 100644
--- a/drivers/hwmon/lis3lv02d.c
+++ b/drivers/hwmon/lis3lv02d.c
@@ -277,7 +277,7 @@
wake_up_interruptible(&lis3_dev.misc_wait);
kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
out:
- if (lis3_dev.whoami == WAI_8B && lis3_dev.idev &&
+ if (lis3_dev.pdata && lis3_dev.whoami == WAI_8B && lis3_dev.idev &&
lis3_dev.idev->input->users)
return IRQ_WAKE_THREAD;
return IRQ_HANDLED;
@@ -718,7 +718,7 @@
* io-apic is not configurable (and generates a warning) but I keep it
* in case of support for other hardware.
*/
- if (dev->whoami == WAI_8B)
+ if (dev->pdata && dev->whoami == WAI_8B)
thread_fn = lis302dl_interrupt_thread1_8b;
else
thread_fn = NULL;
diff --git a/drivers/hwmon/lis3lv02d_i2c.c b/drivers/hwmon/lis3lv02d_i2c.c
index dc1f540..8e5933b 100644
--- a/drivers/hwmon/lis3lv02d_i2c.c
+++ b/drivers/hwmon/lis3lv02d_i2c.c
@@ -121,7 +121,7 @@
{
struct lis3lv02d *lis3 = i2c_get_clientdata(client);
- if (!lis3->pdata->wakeup_flags)
+ if (!lis3->pdata || !lis3->pdata->wakeup_flags)
lis3lv02d_poweroff(lis3);
return 0;
}
@@ -130,7 +130,7 @@
{
struct lis3lv02d *lis3 = i2c_get_clientdata(client);
- if (!lis3->pdata->wakeup_flags)
+ if (!lis3->pdata || !lis3->pdata->wakeup_flags)
lis3lv02d_poweron(lis3);
return 0;
}
diff --git a/drivers/hwmon/lis3lv02d_spi.c b/drivers/hwmon/lis3lv02d_spi.c
index 82b1680..b9be5e3 100644
--- a/drivers/hwmon/lis3lv02d_spi.c
+++ b/drivers/hwmon/lis3lv02d_spi.c
@@ -92,7 +92,7 @@
{
struct lis3lv02d *lis3 = spi_get_drvdata(spi);
- if (!lis3->pdata->wakeup_flags)
+ if (!lis3->pdata || !lis3->pdata->wakeup_flags)
lis3lv02d_poweroff(&lis3_dev);
return 0;
@@ -102,7 +102,7 @@
{
struct lis3lv02d *lis3 = spi_get_drvdata(spi);
- if (!lis3->pdata->wakeup_flags)
+ if (!lis3->pdata || !lis3->pdata->wakeup_flags)
lis3lv02d_poweron(lis3);
return 0;
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 94741d4..464340f 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -91,7 +91,7 @@
struct lm95241_data {
struct device *hwmon_dev;
struct mutex update_lock;
- unsigned long last_updated, rate; /* in jiffies */
+ unsigned long last_updated, interval; /* in jiffies */
char valid; /* zero until following fields are valid */
/* registers values */
u8 local_h, local_l; /* local */
@@ -114,23 +114,23 @@
show_temp(remote1);
show_temp(remote2);
-static ssize_t show_rate(struct device *dev, struct device_attribute *attr,
+static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct lm95241_data *data = lm95241_update_device(dev);
- snprintf(buf, PAGE_SIZE - 1, "%lu\n", 1000 * data->rate / HZ);
+ snprintf(buf, PAGE_SIZE - 1, "%lu\n", 1000 * data->interval / HZ);
return strlen(buf);
}
-static ssize_t set_rate(struct device *dev, struct device_attribute *attr,
+static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct lm95241_data *data = i2c_get_clientdata(client);
- strict_strtol(buf, 10, &data->rate);
- data->rate = data->rate * HZ / 1000;
+ strict_strtol(buf, 10, &data->interval);
+ data->interval = data->interval * HZ / 1000;
return count;
}
@@ -286,7 +286,8 @@
static DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_min2, set_min2);
static DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_max1, set_max1);
static DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_max2, set_max2);
-static DEVICE_ATTR(rate, S_IWUSR | S_IRUGO, show_rate, set_rate);
+static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval,
+ set_interval);
static struct attribute *lm95241_attributes[] = {
&dev_attr_temp1_input.attr,
@@ -298,7 +299,7 @@
&dev_attr_temp3_min.attr,
&dev_attr_temp2_max.attr,
&dev_attr_temp3_max.attr,
- &dev_attr_rate.attr,
+ &dev_attr_update_interval.attr,
NULL
};
@@ -376,7 +377,7 @@
{
struct lm95241_data *data = i2c_get_clientdata(client);
- data->rate = HZ; /* 1 sec default */
+ data->interval = HZ; /* 1 sec default */
data->valid = 0;
data->config = CFG_CR0076;
data->model = 0;
@@ -410,7 +411,7 @@
mutex_lock(&data->update_lock);
- if (time_after(jiffies, data->last_updated + data->rate) ||
+ if (time_after(jiffies, data->last_updated + data->interval) ||
!data->valid) {
dev_dbg(&client->dev, "Updating lm95241 data.\n");
data->local_h =
diff --git a/drivers/hwmon/pkgtemp.c b/drivers/hwmon/pkgtemp.c
index 74157fc..f119039 100644
--- a/drivers/hwmon/pkgtemp.c
+++ b/drivers/hwmon/pkgtemp.c
@@ -33,7 +33,6 @@
#include <linux/list.h>
#include <linux/platform_device.h>
#include <linux/cpu.h>
-#include <linux/pci.h>
#include <asm/msr.h>
#include <asm/processor.h>
@@ -224,7 +223,7 @@
err = sysfs_create_group(&pdev->dev.kobj, &pkgtemp_group);
if (err)
- goto exit_free;
+ goto exit_dev;
data->hwmon_dev = hwmon_device_register(&pdev->dev);
if (IS_ERR(data->hwmon_dev)) {
@@ -238,6 +237,8 @@
exit_class:
sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group);
+exit_dev:
+ device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr);
exit_free:
kfree(data);
exit:
@@ -250,6 +251,7 @@
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group);
+ device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr);
platform_set_drvdata(pdev, NULL);
kfree(data);
return 0;
@@ -281,9 +283,10 @@
int err;
struct platform_device *pdev;
struct pdev_entry *pdev_entry;
-#ifdef CONFIG_SMP
struct cpuinfo_x86 *c = &cpu_data(cpu);
-#endif
+
+ if (!cpu_has(c, X86_FEATURE_PTS))
+ return 0;
mutex_lock(&pdev_list_mutex);
@@ -339,17 +342,18 @@
#ifdef CONFIG_HOTPLUG_CPU
static void pkgtemp_device_remove(unsigned int cpu)
{
- struct pdev_entry *p, *n;
+ struct pdev_entry *p;
unsigned int i;
int err;
mutex_lock(&pdev_list_mutex);
- list_for_each_entry_safe(p, n, &pdev_list, list) {
+ list_for_each_entry(p, &pdev_list, list) {
if (p->cpu != cpu)
continue;
platform_device_unregister(p->pdev);
list_del(&p->list);
+ mutex_unlock(&pdev_list_mutex);
kfree(p);
for_each_cpu(i, cpu_core_mask(cpu)) {
if (i != cpu) {
@@ -358,7 +362,7 @@
break;
}
}
- break;
+ return;
}
mutex_unlock(&pdev_list_mutex);
}
@@ -399,11 +403,6 @@
goto exit;
for_each_online_cpu(i) {
- struct cpuinfo_x86 *c = &cpu_data(i);
-
- if (!cpu_has(c, X86_FEATURE_PTS))
- continue;
-
err = pkgtemp_device_add(i);
if (err)
goto exit_devices_unreg;
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index e96e69d..072c580 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -127,6 +127,7 @@
static inline void
superio_exit(int ioreg)
{
+ outb(0xaa, ioreg);
outb(0x02, ioreg);
outb(0x02, ioreg + 1);
}
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 7674efb..b33c785 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -680,6 +680,8 @@
if (r == 0)
r = num;
+
+ omap_i2c_wait_for_bb(dev);
out:
omap_i2c_idle(dev);
return r;
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 4c3d1bfe..068cef0 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1448,19 +1448,13 @@
if (hwif == NULL)
continue;
- if (hwif->present)
- hwif_register_devices(hwif);
- }
-
- ide_host_for_each_port(i, hwif, host) {
- if (hwif == NULL)
- continue;
-
ide_sysfs_register_port(hwif);
ide_proc_register_port(hwif);
- if (hwif->present)
+ if (hwif->present) {
ide_proc_port_register_devices(hwif);
+ hwif_register_devices(hwif);
+ }
}
return j ? 0 : -1;
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index 8f0caf7..78fbe9f 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -53,7 +53,7 @@
#define T3_MAX_PBL_SIZE 256
#define T3_MAX_RQ_SIZE 1024
#define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1)
-#define T3_MAX_CQ_DEPTH 262144
+#define T3_MAX_CQ_DEPTH 65536
#define T3_MAX_NUM_STAG (1<<15)
#define T3_MAX_MR_SIZE 0x100000000ULL
#define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index d88077a..13c8887 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -463,7 +463,8 @@
V_MSS_IDX(mtu_idx) |
V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
- opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
+ opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
+ V_CONG_CONTROL_FLAVOR(cong_flavor);
skb->priority = CPL_PRIORITY_SETUP;
set_arp_failure_handler(skb, act_open_req_arp_failure);
@@ -1280,7 +1281,8 @@
V_MSS_IDX(mtu_idx) |
V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
- opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
+ opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
+ V_CONG_CONTROL_FLAVOR(cong_flavor);
rpl = cplhdr(skb);
rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 443cea5..61e0efd 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -502,7 +502,9 @@
static void nes_retrans_expired(struct nes_cm_node *cm_node)
{
struct iw_cm_id *cm_id = cm_node->cm_id;
- switch (cm_node->state) {
+ enum nes_cm_node_state state = cm_node->state;
+ cm_node->state = NES_CM_STATE_CLOSED;
+ switch (state) {
case NES_CM_STATE_SYN_RCVD:
case NES_CM_STATE_CLOSING:
rem_ref_cm_node(cm_node->cm_core, cm_node);
@@ -511,7 +513,6 @@
case NES_CM_STATE_FIN_WAIT1:
if (cm_node->cm_id)
cm_id->rem_ref(cm_id);
- cm_node->state = NES_CM_STATE_CLOSED;
send_reset(cm_node, NULL);
break;
default:
@@ -1439,9 +1440,6 @@
break;
case NES_CM_STATE_MPAREQ_RCVD:
passive_state = atomic_add_return(1, &cm_node->passive_state);
- if (passive_state == NES_SEND_RESET_EVENT)
- create_event(cm_node, NES_CM_EVENT_RESET);
- cm_node->state = NES_CM_STATE_CLOSED;
dev_kfree_skb_any(skb);
break;
case NES_CM_STATE_ESTABLISHED:
@@ -1456,6 +1454,7 @@
case NES_CM_STATE_CLOSED:
drop_packet(skb);
break;
+ case NES_CM_STATE_FIN_WAIT2:
case NES_CM_STATE_FIN_WAIT1:
case NES_CM_STATE_LAST_ACK:
cm_node->cm_id->rem_ref(cm_node->cm_id);
@@ -2777,6 +2776,12 @@
return -EINVAL;
}
+ passive_state = atomic_add_return(1, &cm_node->passive_state);
+ if (passive_state == NES_SEND_RESET_EVENT) {
+ rem_ref_cm_node(cm_node->cm_core, cm_node);
+ return -ECONNRESET;
+ }
+
/* associate the node with the QP */
nesqp->cm_node = (void *)cm_node;
cm_node->nesqp = nesqp;
@@ -2979,9 +2984,6 @@
printk(KERN_ERR "%s[%u] OFA CM event_handler returned, "
"ret=%d\n", __func__, __LINE__, ret);
- passive_state = atomic_add_return(1, &cm_node->passive_state);
- if (passive_state == NES_SEND_RESET_EVENT)
- create_event(cm_node, NES_CM_EVENT_RESET);
return 0;
}
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index f8233c8..1980a46 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -3468,6 +3468,19 @@
return; /* Ignore it, wait for close complete */
if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
+ if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) &&
+ (nesqp->ibqp_state == IB_QPS_RTS) &&
+ ((nesadapter->eeprom_version >> 16) != NES_A0)) {
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = iwarp_state;
+ nesqp->hw_tcp_state = tcp_state;
+ nesqp->last_aeq = async_event_id;
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
+ nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
+ nes_cm_disconn(nesqp);
+ }
nesqp->cm_id->add_ref(nesqp->cm_id);
schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp,
NES_TIMER_TYPE_CLOSE, 1, 0);
@@ -3477,7 +3490,6 @@
nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
async_event_id, nesqp->last_aeq, tcp_state);
}
-
break;
case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
if (nesqp->term_flags) {
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index aa9183d..1204c34 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -45,6 +45,7 @@
#define NES_PHY_TYPE_KR 9
#define NES_MULTICAST_PF_MAX 8
+#define NES_A0 3
enum pci_regs {
NES_INT_STAT = 0x0000,
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 6dfdd49..10560c7 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1446,14 +1446,14 @@
NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
nes_write_indexed(nesdev,
- NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp);
+ NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
nesdev->disable_tx_flow_control = 0;
} else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) {
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
nes_write_indexed(nesdev,
- NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp);
+ NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
nesdev->disable_tx_flow_control = 1;
}
if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) {
diff --git a/drivers/input/input.c b/drivers/input/input.c
index a9b025f..ab69820 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1599,11 +1599,14 @@
* @dev: input device supporting MT events and finger tracking
* @num_slots: number of slots used by the device
*
- * This function allocates all necessary memory for MT slot handling
- * in the input device, and adds ABS_MT_SLOT to the device capabilities.
+ * This function allocates all necessary memory for MT slot handling in the
+ * input device, and adds ABS_MT_SLOT to the device capabilities. All slots
+ * are initially marked as unused iby setting ABS_MT_TRACKING_ID to -1.
*/
int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots)
{
+ int i;
+
if (!num_slots)
return 0;
@@ -1614,6 +1617,10 @@
dev->mtsize = num_slots;
input_set_abs_params(dev, ABS_MT_SLOT, 0, num_slots - 1, 0, 0);
+ /* Mark slots as 'unused' */
+ for (i = 0; i < num_slots; i++)
+ dev->mt[i].abs[ABS_MT_TRACKING_ID - ABS_MT_FIRST] = -1;
+
return 0;
}
EXPORT_SYMBOL(input_mt_create_slots);
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index ea67c49..b952317 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -337,10 +337,14 @@
const struct bcm5974_config *cfg,
const struct tp_finger *f)
{
- input_report_abs(input, ABS_MT_TOUCH_MAJOR, raw2int(f->force_major));
- input_report_abs(input, ABS_MT_TOUCH_MINOR, raw2int(f->force_minor));
- input_report_abs(input, ABS_MT_WIDTH_MAJOR, raw2int(f->size_major));
- input_report_abs(input, ABS_MT_WIDTH_MINOR, raw2int(f->size_minor));
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR,
+ raw2int(f->force_major) << 1);
+ input_report_abs(input, ABS_MT_TOUCH_MINOR,
+ raw2int(f->force_minor) << 1);
+ input_report_abs(input, ABS_MT_WIDTH_MAJOR,
+ raw2int(f->size_major) << 1);
+ input_report_abs(input, ABS_MT_WIDTH_MINOR,
+ raw2int(f->size_minor) << 1);
input_report_abs(input, ABS_MT_ORIENTATION,
MAX_FINGER_ORIENTATION - raw2int(f->orientation));
input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x));
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 46e4ba0..f585131 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1485,8 +1485,8 @@
static void __exit i8042_exit(void)
{
- platform_driver_unregister(&i8042_driver);
platform_device_unregister(i8042_platform_device);
+ platform_driver_unregister(&i8042_driver);
i8042_platform_exit();
panic_blink = NULL;
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 40d77ba..6e29bad 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -243,10 +243,10 @@
if (features->type == WACOM_G4 ||
features->type == WACOM_MO) {
input_report_abs(input, ABS_DISTANCE, data[6] & 0x3f);
- rw = (signed)(data[7] & 0x04) - (data[7] & 0x03);
+ rw = (data[7] & 0x04) - (data[7] & 0x03);
} else {
input_report_abs(input, ABS_DISTANCE, data[7] & 0x3f);
- rw = -(signed)data[6];
+ rw = -(signed char)data[6];
}
input_report_rel(input, REL_WHEEL, rw);
}
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index 74dce4b..350eb34 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -81,7 +81,7 @@
int cmd_level;
int slow_level;
- read_lock(&led_dat->rw_lock);
+ read_lock_irq(&led_dat->rw_lock);
cmd_level = gpio_get_value(led_dat->cmd);
slow_level = gpio_get_value(led_dat->slow);
@@ -95,7 +95,7 @@
}
}
- read_unlock(&led_dat->rw_lock);
+ read_unlock_irq(&led_dat->rw_lock);
return ret;
}
@@ -104,8 +104,9 @@
enum ns2_led_modes mode)
{
int i;
+ unsigned long flags;
- write_lock(&led_dat->rw_lock);
+ write_lock_irqsave(&led_dat->rw_lock, flags);
for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) {
if (mode == ns2_led_modval[i].mode) {
@@ -116,7 +117,7 @@
}
}
- write_unlock(&led_dat->rw_lock);
+ write_unlock_irqrestore(&led_dat->rw_lock, flags);
}
static void ns2_led_set(struct led_classdev *led_cdev,
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 43cf9cc..f20d13e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1643,7 +1643,9 @@
bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
if (rdev->sb_size & bmask)
rdev->sb_size = (rdev->sb_size | bmask) + 1;
- }
+ } else
+ max_dev = le32_to_cpu(sb->max_dev);
+
for (i=0; i<max_dev;i++)
sb->dev_roles[i] = cpu_to_le16(0xfffe);
@@ -7069,7 +7071,7 @@
if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
return;
if ( ! (
- (mddev->flags && !mddev->external) ||
+ (mddev->flags & ~ (1<<MD_CHANGE_PENDING)) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
(mddev->external == 0 && mddev->safemode == 1) ||
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 0b591b6..b743312 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -368,7 +368,7 @@
If unsure, say N.
To compile this driver as a module, choose M here: the
- module will be called vmware_balloon.
+ module will be called vmw_balloon.
config ARM_CHARLCD
bool "ARM Ltd. Character LCD Driver"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 255a80d..42eab95 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -33,5 +33,5 @@
obj-$(CONFIG_HMC6352) += hmc6352.o
obj-y += eeprom/
obj-y += cb710/
-obj-$(CONFIG_VMWARE_BALLOON) += vmware_balloon.o
+obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
diff --git a/drivers/misc/vmware_balloon.c b/drivers/misc/vmw_balloon.c
similarity index 100%
rename from drivers/misc/vmware_balloon.c
rename to drivers/misc/vmw_balloon.c
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index bd2755e..f332c52 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -362,9 +362,8 @@
goto err;
}
- err = mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid);
-
- if (!err) {
+ if (ocr & R4_MEMORY_PRESENT
+ && mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid) == 0) {
card->type = MMC_TYPE_SD_COMBO;
if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 5f3a599..87226cd 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -66,6 +66,7 @@
#include <linux/clk.h>
#include <linux/atmel_pdc.h>
#include <linux/gfp.h>
+#include <linux/highmem.h>
#include <linux/mmc/host.h>
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index 9a68ff4..5a950b1 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -148,11 +148,12 @@
while (delay--) {
reg = readw(host->base + MMC_REG_STATUS);
- if (reg & STATUS_CARD_BUS_CLK_RUN)
+ if (reg & STATUS_CARD_BUS_CLK_RUN) {
/* Check twice before cut */
reg = readw(host->base + MMC_REG_STATUS);
if (reg & STATUS_CARD_BUS_CLK_RUN)
return 0;
+ }
if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
return 0;
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 840b301..f2e02d7 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -41,23 +41,35 @@
* @clkreg: default value for MCICLOCK register
* @clkreg_enable: enable value for MMCICLOCK register
* @datalength_bits: number of bits in the MMCIDATALENGTH register
+ * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
+ * is asserted (likewise for RX)
+ * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
+ * is asserted (likewise for RX)
*/
struct variant_data {
unsigned int clkreg;
unsigned int clkreg_enable;
unsigned int datalength_bits;
+ unsigned int fifosize;
+ unsigned int fifohalfsize;
};
static struct variant_data variant_arm = {
+ .fifosize = 16 * 4,
+ .fifohalfsize = 8 * 4,
.datalength_bits = 16,
};
static struct variant_data variant_u300 = {
+ .fifosize = 16 * 4,
+ .fifohalfsize = 8 * 4,
.clkreg_enable = 1 << 13, /* HWFCEN */
.datalength_bits = 16,
};
static struct variant_data variant_ux500 = {
+ .fifosize = 30 * 4,
+ .fifohalfsize = 8 * 4,
.clkreg = MCI_CLK_ENABLE,
.clkreg_enable = 1 << 14, /* HWFCEN */
.datalength_bits = 24,
@@ -138,6 +150,7 @@
static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
{
+ struct variant_data *variant = host->variant;
unsigned int datactrl, timeout, irqmask;
unsigned long long clks;
void __iomem *base;
@@ -173,7 +186,7 @@
* If we have less than a FIFOSIZE of bytes to transfer,
* trigger a PIO interrupt as soon as any data is available.
*/
- if (host->size < MCI_FIFOSIZE)
+ if (host->size < variant->fifosize)
irqmask |= MCI_RXDATAAVLBLMASK;
} else {
/*
@@ -332,13 +345,15 @@
static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
{
+ struct variant_data *variant = host->variant;
void __iomem *base = host->base;
char *ptr = buffer;
do {
unsigned int count, maxcnt;
- maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE : MCI_FIFOHALFSIZE;
+ maxcnt = status & MCI_TXFIFOEMPTY ?
+ variant->fifosize : variant->fifohalfsize;
count = min(remain, maxcnt);
writesl(base + MMCIFIFO, ptr, count >> 2);
@@ -362,6 +377,7 @@
{
struct mmci_host *host = dev_id;
struct sg_mapping_iter *sg_miter = &host->sg_miter;
+ struct variant_data *variant = host->variant;
void __iomem *base = host->base;
unsigned long flags;
u32 status;
@@ -420,7 +436,7 @@
* If we're nearing the end of the read, switch to
* "any data available" mode.
*/
- if (status & MCI_RXACTIVE && host->size < MCI_FIFOSIZE)
+ if (status & MCI_RXACTIVE && host->size < variant->fifosize)
writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);
/*
@@ -564,18 +580,23 @@
if (host->gpio_wp == -ENOSYS)
return -ENOSYS;
- return gpio_get_value(host->gpio_wp);
+ return gpio_get_value_cansleep(host->gpio_wp);
}
static int mmci_get_cd(struct mmc_host *mmc)
{
struct mmci_host *host = mmc_priv(mmc);
+ struct mmci_platform_data *plat = host->plat;
unsigned int status;
- if (host->gpio_cd == -ENOSYS)
- status = host->plat->status(mmc_dev(host->mmc));
- else
- status = !gpio_get_value(host->gpio_cd);
+ if (host->gpio_cd == -ENOSYS) {
+ if (!plat->status)
+ return 1; /* Assume always present */
+
+ status = plat->status(mmc_dev(host->mmc));
+ } else
+ status = !!gpio_get_value_cansleep(host->gpio_cd)
+ ^ plat->cd_invert;
/*
* Use positive logic throughout - status is zero for no card,
@@ -584,6 +605,15 @@
return status;
}
+static irqreturn_t mmci_cd_irq(int irq, void *dev_id)
+{
+ struct mmci_host *host = dev_id;
+
+ mmc_detect_change(host->mmc, msecs_to_jiffies(500));
+
+ return IRQ_HANDLED;
+}
+
static const struct mmc_host_ops mmci_ops = {
.request = mmci_request,
.set_ios = mmci_set_ios,
@@ -620,6 +650,7 @@
host->gpio_wp = -ENOSYS;
host->gpio_cd = -ENOSYS;
+ host->gpio_cd_irq = -1;
host->hw_designer = amba_manf(dev);
host->hw_revision = amba_rev(dev);
@@ -699,7 +730,6 @@
if (host->vcc == NULL)
mmc->ocr_avail = plat->ocr_mask;
mmc->caps = plat->capabilities;
- mmc->caps |= MMC_CAP_NEEDS_POLL;
/*
* We can do SGIO
@@ -744,6 +774,12 @@
host->gpio_cd = plat->gpio_cd;
else if (ret != -ENOSYS)
goto err_gpio_cd;
+
+ ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd),
+ mmci_cd_irq, 0,
+ DRIVER_NAME " (cd)", host);
+ if (ret >= 0)
+ host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
}
if (gpio_is_valid(plat->gpio_wp)) {
ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)");
@@ -755,6 +791,10 @@
goto err_gpio_wp;
}
+ if ((host->plat->status || host->gpio_cd != -ENOSYS)
+ && host->gpio_cd_irq < 0)
+ mmc->caps |= MMC_CAP_NEEDS_POLL;
+
ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
if (ret)
goto unmap;
@@ -781,6 +821,8 @@
if (host->gpio_wp != -ENOSYS)
gpio_free(host->gpio_wp);
err_gpio_wp:
+ if (host->gpio_cd_irq >= 0)
+ free_irq(host->gpio_cd_irq, host);
if (host->gpio_cd != -ENOSYS)
gpio_free(host->gpio_cd);
err_gpio_cd:
@@ -819,6 +861,8 @@
if (host->gpio_wp != -ENOSYS)
gpio_free(host->gpio_wp);
+ if (host->gpio_cd_irq >= 0)
+ free_irq(host->gpio_cd_irq, host);
if (host->gpio_cd != -ENOSYS)
gpio_free(host->gpio_cd);
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 68970cf..4ae887f 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -54,10 +54,16 @@
#define MCI_DPSM_MODE (1 << 2)
#define MCI_DPSM_DMAENABLE (1 << 3)
#define MCI_DPSM_BLOCKSIZE (1 << 4)
-#define MCI_DPSM_RWSTART (1 << 8)
-#define MCI_DPSM_RWSTOP (1 << 9)
-#define MCI_DPSM_RWMOD (1 << 10)
-#define MCI_DPSM_SDIOEN (1 << 11)
+/* Control register extensions in the ST Micro U300 and Ux500 versions */
+#define MCI_ST_DPSM_RWSTART (1 << 8)
+#define MCI_ST_DPSM_RWSTOP (1 << 9)
+#define MCI_ST_DPSM_RWMOD (1 << 10)
+#define MCI_ST_DPSM_SDIOEN (1 << 11)
+/* Control register extensions in the ST Micro Ux500 versions */
+#define MCI_ST_DPSM_DMAREQCTL (1 << 12)
+#define MCI_ST_DPSM_DBOOTMODEEN (1 << 13)
+#define MCI_ST_DPSM_BUSYMODE (1 << 14)
+#define MCI_ST_DPSM_DDRMODE (1 << 15)
#define MMCIDATACNT 0x030
#define MMCISTATUS 0x034
@@ -133,13 +139,6 @@
MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \
MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATABLOCKENDMASK)
-/*
- * The size of the FIFO in bytes.
- */
-#define MCI_FIFOSIZE (16*4)
-
-#define MCI_FIFOHALFSIZE (MCI_FIFOSIZE / 2)
-
#define NR_SG 16
struct clk;
@@ -154,6 +153,7 @@
struct clk *clk;
int gpio_cd;
int gpio_wp;
+ int gpio_cd_irq;
unsigned int data_xfered;
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 4a8776f..4526d27 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2305,7 +2305,6 @@
int ret = 0;
struct platform_device *pdev = to_platform_device(dev);
struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
- pm_message_t state = PMSG_SUSPEND; /* unused by MMC core */
if (host && host->suspended)
return 0;
@@ -2324,8 +2323,8 @@
}
}
cancel_work_sync(&host->mmc_carddetect_work);
- mmc_host_enable(host->mmc);
ret = mmc_suspend_host(host->mmc);
+ mmc_host_enable(host->mmc);
if (ret == 0) {
omap_hsmmc_disable_irq(host);
OMAP_HSMMC_WRITE(host->base, HCTL,
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 2e16e0a..976330d 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1600,7 +1600,7 @@
host->pio_active = XFER_NONE;
#ifdef CONFIG_MMC_S3C_PIODMA
- host->dodma = host->pdata->dma;
+ host->dodma = host->pdata->use_dma;
#endif
host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 71ad416..aacb862 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -241,8 +241,10 @@
static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
{
struct sdhci_host *host = platform_get_drvdata(dev);
+ unsigned long flags;
+
if (host) {
- spin_lock(&host->lock);
+ spin_lock_irqsave(&host->lock, flags);
if (state) {
dev_dbg(&dev->dev, "card inserted.\n");
host->flags &= ~SDHCI_DEVICE_DEAD;
@@ -253,7 +255,7 @@
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
}
tasklet_schedule(&host->card_tasklet);
- spin_unlock(&host->lock);
+ spin_unlock_irqrestore(&host->lock, flags);
}
}
@@ -481,8 +483,10 @@
sdhci_remove_host(host, 1);
for (ptr = 0; ptr < 3; ptr++) {
- clk_disable(sc->clk_bus[ptr]);
- clk_put(sc->clk_bus[ptr]);
+ if (sc->clk_bus[ptr]) {
+ clk_disable(sc->clk_bus[ptr]);
+ clk_put(sc->clk_bus[ptr]);
+ }
}
clk_disable(sc->clk_io);
clk_put(sc->clk_io);
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index ee7d0a5..69d98e3 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -164,6 +164,7 @@
static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
{
struct mmc_data *data = host->data;
+ void *sg_virt;
unsigned short *buf;
unsigned int count;
unsigned long flags;
@@ -173,8 +174,8 @@
return;
}
- buf = (unsigned short *)(tmio_mmc_kmap_atomic(host, &flags) +
- host->sg_off);
+ sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
+ buf = (unsigned short *)(sg_virt + host->sg_off);
count = host->sg_ptr->length - host->sg_off;
if (count > data->blksz)
@@ -191,7 +192,7 @@
host->sg_off += count;
- tmio_mmc_kunmap_atomic(host, &flags);
+ tmio_mmc_kunmap_atomic(sg_virt, &flags);
if (host->sg_off == host->sg_ptr->length)
tmio_mmc_next_sg(host);
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 64f7d5d..0fedc78 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -82,10 +82,7 @@
#define ack_mmc_irqs(host, i) \
do { \
- u32 mask;\
- mask = sd_ctrl_read32((host), CTL_STATUS); \
- mask &= ~((i) & TMIO_MASK_IRQ); \
- sd_ctrl_write32((host), CTL_STATUS, mask); \
+ sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
} while (0)
@@ -177,19 +174,17 @@
return --host->sg_len;
}
-static inline char *tmio_mmc_kmap_atomic(struct tmio_mmc_host *host,
+static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
unsigned long *flags)
{
- struct scatterlist *sg = host->sg_ptr;
-
local_irq_save(*flags);
return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
}
-static inline void tmio_mmc_kunmap_atomic(struct tmio_mmc_host *host,
+static inline void tmio_mmc_kunmap_atomic(void *virt,
unsigned long *flags)
{
- kunmap_atomic(sg_page(host->sg_ptr), KM_BIO_SRC_IRQ);
+ kunmap_atomic(virt, KM_BIO_SRC_IRQ);
local_irq_restore(*flags);
}
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index a382e3d..6fbeefa 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -682,7 +682,6 @@
static int __devexit bf5xx_nand_remove(struct platform_device *pdev)
{
struct bf5xx_nand_info *info = to_nand_info(pdev);
- struct mtd_info *mtd = NULL;
platform_set_drvdata(pdev, NULL);
@@ -690,11 +689,7 @@
* and their partitions, then go through freeing the
* resources used
*/
- mtd = &info->mtd;
- if (mtd) {
- nand_release(mtd);
- kfree(mtd);
- }
+ nand_release(&info->mtd);
peripheral_free_list(bfin_nfc_pin_req);
bf5xx_nand_dma_remove(info);
@@ -710,7 +705,7 @@
struct nand_chip *chip = mtd->priv;
int ret;
- ret = nand_scan_ident(mtd, 1);
+ ret = nand_scan_ident(mtd, 1, NULL);
if (ret)
return ret;
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index fcf8ceb..b2828e8 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -67,7 +67,9 @@
#define NFC_V1_V2_CONFIG1_BIG (1 << 5)
#define NFC_V1_V2_CONFIG1_RST (1 << 6)
#define NFC_V1_V2_CONFIG1_CE (1 << 7)
-#define NFC_V1_V2_CONFIG1_ONE_CYCLE (1 << 8)
+#define NFC_V2_CONFIG1_ONE_CYCLE (1 << 8)
+#define NFC_V2_CONFIG1_PPB(x) (((x) & 0x3) << 9)
+#define NFC_V2_CONFIG1_FP_INT (1 << 11)
#define NFC_V1_V2_CONFIG2_INT (1 << 15)
@@ -402,16 +404,16 @@
/* Wait for operation to complete */
wait_op_done(host, true);
- if (this->options & NAND_BUSWIDTH_16) {
- void __iomem *main_buf = host->main_area0;
- /* compress the ID info */
- writeb(readb(main_buf + 2), main_buf + 1);
- writeb(readb(main_buf + 4), main_buf + 2);
- writeb(readb(main_buf + 6), main_buf + 3);
- writeb(readb(main_buf + 8), main_buf + 4);
- writeb(readb(main_buf + 10), main_buf + 5);
- }
memcpy(host->data_buf, host->main_area0, 16);
+
+ if (this->options & NAND_BUSWIDTH_16) {
+ /* compress the ID info */
+ host->data_buf[1] = host->data_buf[2];
+ host->data_buf[2] = host->data_buf[4];
+ host->data_buf[3] = host->data_buf[6];
+ host->data_buf[4] = host->data_buf[8];
+ host->data_buf[5] = host->data_buf[10];
+ }
}
static uint16_t get_dev_status_v3(struct mxc_nand_host *host)
@@ -729,27 +731,30 @@
{
struct nand_chip *nand_chip = mtd->priv;
struct mxc_nand_host *host = nand_chip->priv;
- uint16_t tmp;
+ uint16_t config1 = 0;
- /* enable interrupt, disable spare enable */
- tmp = readw(NFC_V1_V2_CONFIG1);
- tmp &= ~NFC_V1_V2_CONFIG1_INT_MSK;
- tmp &= ~NFC_V1_V2_CONFIG1_SP_EN;
- if (nand_chip->ecc.mode == NAND_ECC_HW) {
- tmp |= NFC_V1_V2_CONFIG1_ECC_EN;
- } else {
- tmp &= ~NFC_V1_V2_CONFIG1_ECC_EN;
- }
+ if (nand_chip->ecc.mode == NAND_ECC_HW)
+ config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
+
+ if (nfc_is_v21())
+ config1 |= NFC_V2_CONFIG1_FP_INT;
+
+ if (!cpu_is_mx21())
+ config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
if (nfc_is_v21() && mtd->writesize) {
+ uint16_t pages_per_block = mtd->erasesize / mtd->writesize;
+
host->eccsize = get_eccsize(mtd);
if (host->eccsize == 4)
- tmp |= NFC_V2_CONFIG1_ECC_MODE_4;
+ config1 |= NFC_V2_CONFIG1_ECC_MODE_4;
+
+ config1 |= NFC_V2_CONFIG1_PPB(ffs(pages_per_block) - 6);
} else {
host->eccsize = 1;
}
- writew(tmp, NFC_V1_V2_CONFIG1);
+ writew(config1, NFC_V1_V2_CONFIG1);
/* preset operation */
/* Unlock the internal RAM Buffer */
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 4d89f37..4d01cda6 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1320,6 +1320,7 @@
goto fail_free_irq;
}
+#ifdef CONFIG_MTD_PARTITIONS
if (mtd_has_cmdlinepart()) {
static const char *probes[] = { "cmdlinepart", NULL };
struct mtd_partition *parts;
@@ -1332,6 +1333,9 @@
}
return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
+#else
+ return 0;
+#endif
fail_free_irq:
free_irq(irq, info);
@@ -1364,7 +1368,9 @@
platform_set_drvdata(pdev, NULL);
del_mtd_device(mtd);
+#ifdef CONFIG_MTD_PARTITIONS
del_mtd_partitions(mtd);
+#endif
irq = platform_get_irq(pdev, 0);
if (irq >= 0)
free_irq(irq, info);
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index cb443af..a460f1b 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -554,14 +554,13 @@
do {
status = readl(base + S5PC110_DMA_TRANS_STATUS);
+ if (status & S5PC110_DMA_TRANS_STATUS_TE) {
+ writel(S5PC110_DMA_TRANS_CMD_TEC,
+ base + S5PC110_DMA_TRANS_CMD);
+ return -EIO;
+ }
} while (!(status & S5PC110_DMA_TRANS_STATUS_TD));
- if (status & S5PC110_DMA_TRANS_STATUS_TE) {
- writel(S5PC110_DMA_TRANS_CMD_TEC, base + S5PC110_DMA_TRANS_CMD);
- writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
- return -EIO;
- }
-
writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
return 0;
@@ -571,13 +570,12 @@
unsigned char *buffer, int offset, size_t count)
{
struct onenand_chip *this = mtd->priv;
- void __iomem *bufferram;
void __iomem *p;
void *buf = (void *) buffer;
dma_addr_t dma_src, dma_dst;
int err;
- p = bufferram = this->base + area;
+ p = this->base + area;
if (ONENAND_CURRENT_BUFFERRAM(this)) {
if (area == ONENAND_DATARAM)
p += this->writesize;
@@ -621,7 +619,7 @@
normal:
if (count != mtd->writesize) {
/* Copy the bufferram to memory to prevent unaligned access */
- memcpy(this->page_buf, bufferram, mtd->writesize);
+ memcpy(this->page_buf, p, mtd->writesize);
p = this->page_buf + offset;
}
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index a045559..179871d 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -635,6 +635,9 @@
must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */
large_frames:1, /* accept large frames */
handling_irq:1; /* private in_irq indicator */
+ /* {get|set}_wol operations are already serialized by rtnl.
+ * no additional locking is required for the enable_wol and acpi_set_WOL()
+ */
int drv_flags;
u16 status_enable;
u16 intr_enable;
@@ -1994,10 +1997,9 @@
}
}
- if (status & RxEarly) { /* Rx early is unused. */
- vortex_rx(dev);
+ if (status & RxEarly) /* Rx early is unused. */
iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD);
- }
+
if (status & StatsFull) { /* Empty statistics. */
static int DoneDidThat;
if (vortex_debug > 4)
@@ -2298,7 +2300,12 @@
if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {
if (status == 0xffff)
break;
+ if (status & RxEarly)
+ vortex_rx(dev);
+ spin_unlock(&vp->window_lock);
vortex_error(dev, status);
+ spin_lock(&vp->window_lock);
+ window_set(vp, 7);
}
if (--work_done < 0) {
@@ -2935,28 +2942,31 @@
{
struct vortex_private *vp = netdev_priv(dev);
- spin_lock_irq(&vp->lock);
+ if (!VORTEX_PCI(vp))
+ return;
+
wol->supported = WAKE_MAGIC;
wol->wolopts = 0;
if (vp->enable_wol)
wol->wolopts |= WAKE_MAGIC;
- spin_unlock_irq(&vp->lock);
}
static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct vortex_private *vp = netdev_priv(dev);
+
+ if (!VORTEX_PCI(vp))
+ return -EOPNOTSUPP;
+
if (wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
- spin_lock_irq(&vp->lock);
if (wol->wolopts & WAKE_MAGIC)
vp->enable_wol = 1;
else
vp->enable_wol = 0;
acpi_set_WOL(dev);
- spin_unlock_irq(&vp->lock);
return 0;
}
@@ -3198,6 +3208,9 @@
return;
}
+ if (VORTEX_PCI(vp)->current_state < PCI_D3hot)
+ return;
+
/* Change the power state to D3; RxEnable doesn't take effect. */
pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot);
}
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 63b9ba0..c73be28 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -1251,6 +1251,12 @@
rrd_ring->desc = NULL;
rrd_ring->dma = 0;
+
+ adapter->cmb.dma = 0;
+ adapter->cmb.cmb = NULL;
+
+ adapter->smb.dma = 0;
+ adapter->smb.smb = NULL;
}
static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
@@ -2847,10 +2853,11 @@
pci_enable_wake(pdev, PCI_D3cold, 0);
atl1_reset_hw(&adapter->hw);
- adapter->cmb.cmb->int_stats = 0;
- if (netif_running(netdev))
+ if (netif_running(netdev)) {
+ adapter->cmb.cmb->int_stats = 0;
atl1_up(adapter);
+ }
netif_device_attach(netdev);
return 0;
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 37617ab..1e620e2 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -848,6 +848,15 @@
b44_tx(bp);
/* spin_unlock(&bp->tx_lock); */
}
+ if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */
+ bp->istat &= ~ISTAT_RFO;
+ b44_disable_ints(bp);
+ ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
+ b44_init_rings(bp);
+ b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
+ netif_wake_queue(bp->dev);
+ }
+
spin_unlock_irqrestore(&bp->lock, flags);
work_done = 0;
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 99197bd..53306bf 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -181,6 +181,7 @@
u64 be_rx_bytes_prev;
u64 be_rx_pkts;
u32 be_rx_rate;
+ u32 be_rx_mcast_pkt;
/* number of non ether type II frames dropped where
* frame len > length field of Mac Hdr */
u32 be_802_3_dropped_frames;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 3d30549..34abcc9 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -140,10 +140,8 @@
while ((compl = be_mcc_compl_get(adapter))) {
if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
/* Interpret flags as an async trailer */
- BUG_ON(!is_link_state_evt(compl->flags));
-
- /* Interpret compl as a async link evt */
- be_async_link_state_process(adapter,
+ if (is_link_state_evt(compl->flags))
+ be_async_link_state_process(adapter,
(struct be_async_event_link_state *) compl);
} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
*status = be_mcc_compl_process(adapter, compl);
@@ -207,7 +205,7 @@
if (msecs > 4000) {
dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
- be_dump_ue(adapter);
+ be_detect_dump_ue(adapter);
return -1;
}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index bdc10a2..ad1e6fa 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -992,5 +992,5 @@
extern int be_cmd_get_phy_info(struct be_adapter *adapter,
struct be_dma_mem *cmd);
extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
-extern void be_dump_ue(struct be_adapter *adapter);
+extern void be_detect_dump_ue(struct be_adapter *adapter);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index cd16243..13f0abb 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -60,6 +60,7 @@
{DRVSTAT_INFO(be_rx_events)},
{DRVSTAT_INFO(be_tx_compl)},
{DRVSTAT_INFO(be_rx_compl)},
+ {DRVSTAT_INFO(be_rx_mcast_pkt)},
{DRVSTAT_INFO(be_ethrx_post_fail)},
{DRVSTAT_INFO(be_802_3_dropped_frames)},
{DRVSTAT_INFO(be_802_3_malformed_frames)},
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index 5d38046..a2ec5df 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -167,8 +167,11 @@
#define FLASH_FCoE_BIOS_START_g3 (13631488)
#define FLASH_REDBOOT_START_g3 (262144)
-
-
+/************* Rx Packet Type Encoding **************/
+#define BE_UNICAST_PACKET 0
+#define BE_MULTICAST_PACKET 1
+#define BE_BROADCAST_PACKET 2
+#define BE_RSVD_PACKET 3
/*
* BE descriptors: host memory data structures whose formats
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 74e146f..6eda7a0 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -247,6 +247,7 @@
dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts;
dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes;
dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes;
+ dev_stats->multicast = drvr_stats(adapter)->be_rx_mcast_pkt;
/* bad pkts received */
dev_stats->rx_errors = port_stats->rx_crc_errors +
@@ -294,7 +295,6 @@
/* no space available in linux */
dev_stats->tx_dropped = 0;
- dev_stats->multicast = port_stats->rx_multicast_frames;
dev_stats->collisions = 0;
/* detailed tx_errors */
@@ -848,7 +848,7 @@
}
static void be_rx_stats_update(struct be_adapter *adapter,
- u32 pktsize, u16 numfrags)
+ u32 pktsize, u16 numfrags, u8 pkt_type)
{
struct be_drvr_stats *stats = drvr_stats(adapter);
@@ -856,6 +856,9 @@
stats->be_rx_frags += numfrags;
stats->be_rx_bytes += pktsize;
stats->be_rx_pkts++;
+
+ if (pkt_type == BE_MULTICAST_PACKET)
+ stats->be_rx_mcast_pkt++;
}
static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
@@ -925,9 +928,11 @@
u16 rxq_idx, i, j;
u32 pktsize, hdr_len, curr_frag_len, size;
u8 *start;
+ u8 pkt_type;
rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
+ pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
page_info = get_rx_page_info(adapter, rxq_idx);
@@ -993,7 +998,7 @@
BUG_ON(j > MAX_SKB_FRAGS);
done:
- be_rx_stats_update(adapter, pktsize, num_rcvd);
+ be_rx_stats_update(adapter, pktsize, num_rcvd, pkt_type);
}
/* Process the RX completion indicated by rxcp when GRO is disabled */
@@ -1060,6 +1065,7 @@
u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
u16 i, rxq_idx = 0, vid, j;
u8 vtm;
+ u8 pkt_type;
num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
/* Is it a flush compl that has no data */
@@ -1070,6 +1076,7 @@
vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
+ pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
/* vlanf could be wrongly set in some cards.
* ignore if vtm is not set */
@@ -1125,7 +1132,7 @@
vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
}
- be_rx_stats_update(adapter, pkt_size, num_rcvd);
+ be_rx_stats_update(adapter, pkt_size, num_rcvd, pkt_type);
}
static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
@@ -1743,26 +1750,7 @@
return 1;
}
-static inline bool be_detect_ue(struct be_adapter *adapter)
-{
- u32 online0 = 0, online1 = 0;
-
- pci_read_config_dword(adapter->pdev, PCICFG_ONLINE0, &online0);
-
- pci_read_config_dword(adapter->pdev, PCICFG_ONLINE1, &online1);
-
- if (!online0 || !online1) {
- adapter->ue_detected = true;
- dev_err(&adapter->pdev->dev,
- "UE Detected!! online0=%d online1=%d\n",
- online0, online1);
- return true;
- }
-
- return false;
-}
-
-void be_dump_ue(struct be_adapter *adapter)
+void be_detect_dump_ue(struct be_adapter *adapter)
{
u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
u32 i;
@@ -1779,6 +1767,11 @@
ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
+ if (ue_status_lo || ue_status_hi) {
+ adapter->ue_detected = true;
+ dev_err(&adapter->pdev->dev, "UE Detected!!\n");
+ }
+
if (ue_status_lo) {
for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
if (ue_status_lo & 1)
@@ -1814,10 +1807,8 @@
adapter->rx_post_starved = false;
be_post_rx_frags(adapter);
}
- if (!adapter->ue_detected) {
- if (be_detect_ue(adapter))
- be_dump_ue(adapter);
- }
+ if (!adapter->ue_detected)
+ be_detect_dump_ue(adapter);
schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
}
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 822f586..0ddf4c6 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2466,6 +2466,9 @@
if (!(dev->flags & IFF_MASTER))
goto out;
+ if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
+ goto out;
+
read_lock(&bond->lock);
slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev),
orig_dev);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index c746b33..26bb118 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -362,6 +362,9 @@
goto out;
}
+ if (!pskb_may_pull(skb, arp_hdr_len(bond_dev)))
+ goto out;
+
if (skb->len < sizeof(struct arp_pkt)) {
pr_debug("Packet is too small to be an ARP\n");
goto out;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2cc4cfc..3b16f62 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2797,9 +2797,15 @@
* so it can wait
*/
bond_for_each_slave(bond, slave, i) {
+ unsigned long trans_start = dev_trans_start(slave->dev);
+
if (slave->link != BOND_LINK_UP) {
- if (time_before_eq(jiffies, dev_trans_start(slave->dev) + delta_in_ticks) &&
- time_before_eq(jiffies, slave->dev->last_rx + delta_in_ticks)) {
+ if (time_in_range(jiffies,
+ trans_start - delta_in_ticks,
+ trans_start + delta_in_ticks) &&
+ time_in_range(jiffies,
+ slave->dev->last_rx - delta_in_ticks,
+ slave->dev->last_rx + delta_in_ticks)) {
slave->link = BOND_LINK_UP;
slave->state = BOND_STATE_ACTIVE;
@@ -2827,8 +2833,12 @@
* when the source ip is 0, so don't take the link down
* if we don't know our ip yet
*/
- if (time_after_eq(jiffies, dev_trans_start(slave->dev) + 2*delta_in_ticks) ||
- (time_after_eq(jiffies, slave->dev->last_rx + 2*delta_in_ticks))) {
+ if (!time_in_range(jiffies,
+ trans_start - delta_in_ticks,
+ trans_start + 2 * delta_in_ticks) ||
+ !time_in_range(jiffies,
+ slave->dev->last_rx - delta_in_ticks,
+ slave->dev->last_rx + 2 * delta_in_ticks)) {
slave->link = BOND_LINK_DOWN;
slave->state = BOND_STATE_BACKUP;
@@ -2883,13 +2893,16 @@
{
struct slave *slave;
int i, commit = 0;
+ unsigned long trans_start;
bond_for_each_slave(bond, slave, i) {
slave->new_link = BOND_LINK_NOCHANGE;
if (slave->link != BOND_LINK_UP) {
- if (time_before_eq(jiffies, slave_last_rx(bond, slave) +
- delta_in_ticks)) {
+ if (time_in_range(jiffies,
+ slave_last_rx(bond, slave) - delta_in_ticks,
+ slave_last_rx(bond, slave) + delta_in_ticks)) {
+
slave->new_link = BOND_LINK_UP;
commit++;
}
@@ -2902,8 +2915,9 @@
* active. This avoids bouncing, as the last receive
* times need a full ARP monitor cycle to be updated.
*/
- if (!time_after_eq(jiffies, slave->jiffies +
- 2 * delta_in_ticks))
+ if (time_in_range(jiffies,
+ slave->jiffies - delta_in_ticks,
+ slave->jiffies + 2 * delta_in_ticks))
continue;
/*
@@ -2921,8 +2935,10 @@
*/
if (slave->state == BOND_STATE_BACKUP &&
!bond->current_arp_slave &&
- time_after(jiffies, slave_last_rx(bond, slave) +
- 3 * delta_in_ticks)) {
+ !time_in_range(jiffies,
+ slave_last_rx(bond, slave) - delta_in_ticks,
+ slave_last_rx(bond, slave) + 3 * delta_in_ticks)) {
+
slave->new_link = BOND_LINK_DOWN;
commit++;
}
@@ -2933,11 +2949,15 @@
* - (more than 2*delta since receive AND
* the bond has an IP address)
*/
+ trans_start = dev_trans_start(slave->dev);
if ((slave->state == BOND_STATE_ACTIVE) &&
- (time_after_eq(jiffies, dev_trans_start(slave->dev) +
- 2 * delta_in_ticks) ||
- (time_after_eq(jiffies, slave_last_rx(bond, slave)
- + 2 * delta_in_ticks)))) {
+ (!time_in_range(jiffies,
+ trans_start - delta_in_ticks,
+ trans_start + 2 * delta_in_ticks) ||
+ !time_in_range(jiffies,
+ slave_last_rx(bond, slave) - delta_in_ticks,
+ slave_last_rx(bond, slave) + 2 * delta_in_ticks))) {
+
slave->new_link = BOND_LINK_DOWN;
commit++;
}
@@ -2956,6 +2976,7 @@
{
struct slave *slave;
int i;
+ unsigned long trans_start;
bond_for_each_slave(bond, slave, i) {
switch (slave->new_link) {
@@ -2963,10 +2984,11 @@
continue;
case BOND_LINK_UP:
+ trans_start = dev_trans_start(slave->dev);
if ((!bond->curr_active_slave &&
- time_before_eq(jiffies,
- dev_trans_start(slave->dev) +
- delta_in_ticks)) ||
+ time_in_range(jiffies,
+ trans_start - delta_in_ticks,
+ trans_start + delta_in_ticks)) ||
bond->curr_active_slave != slave) {
slave->link = BOND_LINK_UP;
bond->current_arp_slave = NULL;
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index ad19585..f208712 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -2296,6 +2296,8 @@
case CHELSIO_GET_QSET_NUM:{
struct ch_reg edata;
+ memset(&edata, 0, sizeof(struct ch_reg));
+
edata.cmd = CHELSIO_GET_QSET_NUM;
edata.val = pi->nqsets;
if (copy_to_user(useraddr, &edata, sizeof(edata)))
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 66ed08f..ba302a5 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -57,6 +57,7 @@
E1000_SCTL = 0x00024, /* SerDes Control - RW */
E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */
E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */
+ E1000_FEXTNVM4 = 0x00024, /* Future Extended NVM 4 - RW */
E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */
E1000_FCT = 0x00030, /* Flow Control Type - RW */
E1000_VET = 0x00038, /* VLAN Ether Type - RW */
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 63930d1..57b5435 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -105,6 +105,10 @@
#define E1000_FEXTNVM_SW_CONFIG 1
#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */
+#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
+
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
#define E1000_ICH_RAR_ENTRIES 7
@@ -125,6 +129,7 @@
/* SMBus Address Phy Register */
#define HV_SMB_ADDR PHY_REG(768, 26)
+#define HV_SMB_ADDR_MASK 0x007F
#define HV_SMB_ADDR_PEC_EN 0x0200
#define HV_SMB_ADDR_VALID 0x0080
@@ -237,6 +242,8 @@
static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
+static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
+static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
{
@@ -272,7 +279,7 @@
static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
- u32 ctrl;
+ u32 ctrl, fwsm;
s32 ret_val = 0;
phy->addr = 1;
@@ -294,7 +301,8 @@
* disabled, then toggle the LANPHYPC Value bit to force
* the interconnect to PCIe mode.
*/
- if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+ fwsm = er32(FWSM);
+ if (!(fwsm & E1000_ICH_FWSM_FW_VALID)) {
ctrl = er32(CTRL);
ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
@@ -303,6 +311,13 @@
ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
ew32(CTRL, ctrl);
msleep(50);
+
+ /*
+ * Gate automatic PHY configuration by hardware on
+ * non-managed 82579
+ */
+ if (hw->mac.type == e1000_pch2lan)
+ e1000_gate_hw_phy_config_ich8lan(hw, true);
}
/*
@@ -315,6 +330,13 @@
if (ret_val)
goto out;
+ /* Ungate automatic PHY configuration on non-managed 82579 */
+ if ((hw->mac.type == e1000_pch2lan) &&
+ !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
+ msleep(10);
+ e1000_gate_hw_phy_config_ich8lan(hw, false);
+ }
+
phy->id = e1000_phy_unknown;
ret_val = e1000e_get_phy_id(hw);
if (ret_val)
@@ -561,13 +583,10 @@
if (mac->type == e1000_ich8lan)
e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
- /* Disable PHY configuration by hardware, config by software */
- if (mac->type == e1000_pch2lan) {
- u32 extcnf_ctrl = er32(EXTCNF_CTRL);
-
- extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
- ew32(EXTCNF_CTRL, extcnf_ctrl);
- }
+ /* Gate automatic PHY configuration by hardware on managed 82579 */
+ if ((mac->type == e1000_pch2lan) &&
+ (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+ e1000_gate_hw_phy_config_ich8lan(hw, true);
return 0;
}
@@ -652,6 +671,12 @@
goto out;
}
+ if (hw->mac.type == e1000_pch2lan) {
+ ret_val = e1000_k1_workaround_lv(hw);
+ if (ret_val)
+ goto out;
+ }
+
/*
* Check if there was DownShift, must be checked
* immediately after link-up
@@ -895,6 +920,34 @@
}
/**
+ * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
+ * @hw: pointer to the HW structure
+ *
+ * Assumes semaphore already acquired.
+ *
+ **/
+static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
+{
+ u16 phy_data;
+ u32 strap = er32(STRAP);
+ s32 ret_val = 0;
+
+ strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
+
+ ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data &= ~HV_SMB_ADDR_MASK;
+ phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
+ phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
+ ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
+
+out:
+ return ret_val;
+}
+
+/**
* e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
* @hw: pointer to the HW structure
*
@@ -903,7 +956,6 @@
**/
static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
{
- struct e1000_adapter *adapter = hw->adapter;
struct e1000_phy_info *phy = &hw->phy;
u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
s32 ret_val = 0;
@@ -921,7 +973,8 @@
if (phy->type != e1000_phy_igp_3)
return ret_val;
- if (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) {
+ if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) {
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
break;
}
@@ -961,21 +1014,16 @@
cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
- if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
- ((hw->mac.type == e1000_pchlan) ||
- (hw->mac.type == e1000_pch2lan))) {
+ if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
+ (hw->mac.type == e1000_pchlan)) ||
+ (hw->mac.type == e1000_pch2lan)) {
/*
* HW configures the SMBus address and LEDs when the
* OEM and LCD Write Enable bits are set in the NVM.
* When both NVM bits are cleared, SW will configure
* them instead.
*/
- data = er32(STRAP);
- data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
- reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
- reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
- ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
- reg_data);
+ ret_val = e1000_write_smbus_addr(hw);
if (ret_val)
goto out;
@@ -1440,10 +1488,6 @@
goto out;
/* Enable jumbo frame workaround in the PHY */
- e1e_rphy(hw, PHY_REG(769, 20), &data);
- ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14));
- if (ret_val)
- goto out;
e1e_rphy(hw, PHY_REG(769, 23), &data);
data &= ~(0x7F << 5);
data |= (0x37 << 5);
@@ -1452,7 +1496,6 @@
goto out;
e1e_rphy(hw, PHY_REG(769, 16), &data);
data &= ~(1 << 13);
- data |= (1 << 12);
ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
if (ret_val)
goto out;
@@ -1477,7 +1520,7 @@
mac_reg = er32(RCTL);
mac_reg &= ~E1000_RCTL_SECRC;
- ew32(FFLT_DBG, mac_reg);
+ ew32(RCTL, mac_reg);
ret_val = e1000e_read_kmrn_reg(hw,
E1000_KMRNCTRLSTA_CTRL_OFFSET,
@@ -1503,17 +1546,12 @@
goto out;
/* Write PHY register values back to h/w defaults */
- e1e_rphy(hw, PHY_REG(769, 20), &data);
- ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14));
- if (ret_val)
- goto out;
e1e_rphy(hw, PHY_REG(769, 23), &data);
data &= ~(0x7F << 5);
ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
if (ret_val)
goto out;
e1e_rphy(hw, PHY_REG(769, 16), &data);
- data &= ~(1 << 12);
data |= (1 << 13);
ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
if (ret_val)
@@ -1559,6 +1597,69 @@
}
/**
+ * e1000_k1_gig_workaround_lv - K1 Si workaround
+ * @hw: pointer to the HW structure
+ *
+ * Workaround to set the K1 beacon duration for 82579 parts
+ **/
+static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u16 status_reg = 0;
+ u32 mac_reg;
+
+ if (hw->mac.type != e1000_pch2lan)
+ goto out;
+
+ /* Set K1 beacon duration based on 1Gbps speed or otherwise */
+ ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
+ if (ret_val)
+ goto out;
+
+ if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
+ == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
+ mac_reg = er32(FEXTNVM4);
+ mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
+
+ if (status_reg & HV_M_STATUS_SPEED_1000)
+ mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
+ else
+ mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
+
+ ew32(FEXTNVM4, mac_reg);
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
+ * @hw: pointer to the HW structure
+ * @gate: boolean set to true to gate, false to ungate
+ *
+ * Gate/ungate the automatic PHY configuration via hardware; perform
+ * the configuration via software instead.
+ **/
+static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
+{
+ u32 extcnf_ctrl;
+
+ if (hw->mac.type != e1000_pch2lan)
+ return;
+
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+
+ if (gate)
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+ else
+ extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+ return;
+}
+
+/**
* e1000_lan_init_done_ich8lan - Check for PHY config completion
* @hw: pointer to the HW structure
*
@@ -1602,6 +1703,9 @@
if (e1000_check_reset_block(hw))
goto out;
+ /* Allow time for h/w to get to quiescent state after reset */
+ msleep(10);
+
/* Perform any necessary post-reset workarounds */
switch (hw->mac.type) {
case e1000_pchlan:
@@ -1630,6 +1734,13 @@
/* Configure the LCD with the OEM bits in NVM */
ret_val = e1000_oem_bits_config_ich8lan(hw, true);
+ /* Ungate automatic PHY configuration on non-managed 82579 */
+ if ((hw->mac.type == e1000_pch2lan) &&
+ !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+ msleep(10);
+ e1000_gate_hw_phy_config_ich8lan(hw, false);
+ }
+
out:
return ret_val;
}
@@ -1646,6 +1757,11 @@
{
s32 ret_val = 0;
+ /* Gate automatic PHY configuration by hardware on non-managed 82579 */
+ if ((hw->mac.type == e1000_pch2lan) &&
+ !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+ e1000_gate_hw_phy_config_ich8lan(hw, true);
+
ret_val = e1000e_phy_hw_reset_generic(hw);
if (ret_val)
goto out;
@@ -2910,6 +3026,14 @@
* external PHY is reset.
*/
ctrl |= E1000_CTRL_PHY_RST;
+
+ /*
+ * Gate automatic PHY configuration by hardware on
+ * non-managed 82579
+ */
+ if ((hw->mac.type == e1000_pch2lan) &&
+ !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+ e1000_gate_hw_phy_config_ich8lan(hw, true);
}
ret_val = e1000_acquire_swflag_ich8lan(hw);
e_dbg("Issuing a global reset to ich8lan\n");
@@ -3460,13 +3584,20 @@
void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
{
u32 phy_ctrl;
+ s32 ret_val;
phy_ctrl = er32(PHY_CTRL);
phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE;
ew32(PHY_CTRL, phy_ctrl);
- if (hw->mac.type >= e1000_pchlan)
- e1000_phy_hw_reset_ich8lan(hw);
+ if (hw->mac.type >= e1000_pchlan) {
+ e1000_oem_bits_config_ich8lan(hw, true);
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return;
+ e1000_write_smbus_addr(hw);
+ hw->phy.ops.release(hw);
+ }
}
/**
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 2b8ef44..e561d15 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -2704,6 +2704,16 @@
u32 psrctl = 0;
u32 pages = 0;
+ /* Workaround Si errata on 82579 - configure jumbo frame flow */
+ if (hw->mac.type == e1000_pch2lan) {
+ s32 ret_val;
+
+ if (adapter->netdev->mtu > ETH_DATA_LEN)
+ ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
+ else
+ ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
+ }
+
/* Program MC offset vector base */
rctl = er32(RCTL);
rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
@@ -2744,16 +2754,6 @@
e1e_wphy(hw, 22, phy_data);
}
- /* Workaround Si errata on 82579 - configure jumbo frame flow */
- if (hw->mac.type == e1000_pch2lan) {
- s32 ret_val;
-
- if (rctl & E1000_RCTL_LPE)
- ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
- else
- ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
- }
-
/* Setup buffer sizes */
rctl &= ~E1000_RCTL_SZ_4096;
rctl |= E1000_RCTL_BSEX;
@@ -4833,6 +4833,15 @@
return -EINVAL;
}
+ /* Jumbo frame workaround on 82579 requires CRC be stripped */
+ if ((adapter->hw.mac.type == e1000_pch2lan) &&
+ !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
+ (new_mtu > ETH_DATA_LEN)) {
+ e_err("Jumbo Frames not supported on 82579 when CRC "
+ "stripping is disabled.\n");
+ return -EINVAL;
+ }
+
/* 82573 Errata 17 */
if (((adapter->hw.mac.type == e1000_82573) ||
(adapter->hw.mac.type == e1000_82574)) &&
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index dda2c79..0cb1cf9 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -555,6 +555,8 @@
equalizer_t *eql;
master_config_t mc;
+ memset(&mc, 0, sizeof(master_config_t));
+
if (eql_is_master(dev)) {
eql = netdev_priv(dev);
mc.max_slaves = eql->max_slaves;
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 3506fd6..519e19e 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2928,7 +2928,7 @@
if (dev->emac_irq != NO_IRQ)
irq_dispose_mapping(dev->emac_irq);
err_free:
- kfree(ndev);
+ free_netdev(ndev);
err_gone:
/* if we were on the bootlist, remove us as we won't show up and
* wake up all waiters to notify them in case they were waiting
@@ -2971,7 +2971,7 @@
if (dev->emac_irq != NO_IRQ)
irq_dispose_mapping(dev->emac_irq);
- kfree(dev->ndev);
+ free_netdev(dev->ndev);
return 0;
}
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index b4fb07a..51919fc 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -503,30 +503,33 @@
ks8851_wrreg16(ks, KS_RXQCR,
ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE);
- if (rxlen > 0) {
- skb = netdev_alloc_skb(ks->netdev, rxlen + 2 + 8);
- if (!skb) {
- /* todo - dump frame and move on */
+ if (rxlen > 4) {
+ unsigned int rxalign;
+
+ rxlen -= 4;
+ rxalign = ALIGN(rxlen, 4);
+ skb = netdev_alloc_skb_ip_align(ks->netdev, rxalign);
+ if (skb) {
+
+ /* 4 bytes of status header + 4 bytes of
+ * garbage: we put them before ethernet
+ * header, so that they are copied,
+ * but ignored.
+ */
+
+ rxpkt = skb_put(skb, rxlen) - 8;
+
+ ks8851_rdfifo(ks, rxpkt, rxalign + 8);
+
+ if (netif_msg_pktdata(ks))
+ ks8851_dbg_dumpkkt(ks, rxpkt);
+
+ skb->protocol = eth_type_trans(skb, ks->netdev);
+ netif_rx(skb);
+
+ ks->netdev->stats.rx_packets++;
+ ks->netdev->stats.rx_bytes += rxlen;
}
-
- /* two bytes to ensure ip is aligned, and four bytes
- * for the status header and 4 bytes of garbage */
- skb_reserve(skb, 2 + 4 + 4);
-
- rxpkt = skb_put(skb, rxlen - 4) - 8;
-
- /* align the packet length to 4 bytes, and add 4 bytes
- * as we're getting the rx status header as well */
- ks8851_rdfifo(ks, rxpkt, ALIGN(rxlen, 4) + 8);
-
- if (netif_msg_pktdata(ks))
- ks8851_dbg_dumpkkt(ks, rxpkt);
-
- skb->protocol = eth_type_trans(skb, ks->netdev);
- netif_rx(skb);
-
- ks->netdev->stats.rx_packets++;
- ks->netdev->stats.rx_bytes += rxlen - 4;
}
ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index bdf2149..87f0a93 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -38,6 +38,7 @@
#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
+#include <linux/of_address.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/tcp.h> /* needed for sizeof(tcphdr) */
diff --git a/drivers/net/ll_temac_mdio.c b/drivers/net/ll_temac_mdio.c
index 5ae28c9..8cf9d4f 100644
--- a/drivers/net/ll_temac_mdio.c
+++ b/drivers/net/ll_temac_mdio.c
@@ -10,6 +10,7 @@
#include <linux/phy.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/of_mdio.h>
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index cabae7b..b075a35 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -1540,7 +1540,6 @@
if (pkt_offset)
skb_pull(skb, pkt_offset);
- skb->truesize = skb->len + sizeof(struct sk_buff);
skb->protocol = eth_type_trans(skb, netdev);
napi_gro_receive(&sds_ring->napi, skb);
@@ -1602,8 +1601,6 @@
skb_put(skb, lro_length + data_offset);
- skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
-
skb_pull(skb, l2_hdr_offset);
skb->protocol = eth_type_trans(skb, netdev);
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index bc695d5..fe6983a 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -7269,32 +7269,28 @@
struct niu_parent *parent = np->parent;
struct niu_tcam_entry *tp;
int i, idx, cnt;
- u16 n_entries;
unsigned long flags;
-
+ int ret = 0;
/* put the tcam size here */
nfc->data = tcam_get_size(np);
niu_lock_parent(np, flags);
- n_entries = nfc->rule_cnt;
for (cnt = 0, i = 0; i < nfc->data; i++) {
idx = tcam_get_index(np, i);
tp = &parent->tcam[idx];
if (!tp->valid)
continue;
+ if (cnt == nfc->rule_cnt) {
+ ret = -EMSGSIZE;
+ break;
+ }
rule_locs[cnt] = i;
cnt++;
}
niu_unlock_parent(np, flags);
- if (n_entries != cnt) {
- /* print warning, this should not happen */
- netdev_info(np->dev, "niu%d: In %s(): n_entries[%d] != cnt[%d]!!!\n",
- np->parent->index, __func__, n_entries, cnt);
- }
-
- return 0;
+ return ret;
}
static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 49279b0..f9b509a 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -508,7 +508,8 @@
unsigned int vcc,
void *priv_data)
{
- int *has_shmem = priv_data;
+ int *priv = priv_data;
+ int try = (*priv & 0x1);
int i;
cistpl_io_t *io = &cfg->io;
@@ -525,77 +526,103 @@
i = p_dev->resource[1]->end = 0;
}
- *has_shmem = ((cfg->mem.nwin == 1) &&
- (cfg->mem.win[0].len >= 0x4000));
+ *priv &= ((cfg->mem.nwin == 1) &&
+ (cfg->mem.win[0].len >= 0x4000)) ? 0x10 : ~0x10;
+
p_dev->resource[0]->start = io->win[i].base;
p_dev->resource[0]->end = io->win[i].len;
- p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
+ if (!try)
+ p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
+ else
+ p_dev->io_lines = 16;
if (p_dev->resource[0]->end + p_dev->resource[1]->end >= 32)
return try_io_port(p_dev);
- return 0;
+ return -EINVAL;
+}
+
+static hw_info_t *pcnet_try_config(struct pcmcia_device *link,
+ int *has_shmem, int try)
+{
+ struct net_device *dev = link->priv;
+ hw_info_t *local_hw_info;
+ pcnet_dev_t *info = PRIV(dev);
+ int priv = try;
+ int ret;
+
+ ret = pcmcia_loop_config(link, pcnet_confcheck, &priv);
+ if (ret) {
+ dev_warn(&link->dev, "no useable port range found\n");
+ return NULL;
+ }
+ *has_shmem = (priv & 0x10);
+
+ if (!link->irq)
+ return NULL;
+
+ if (resource_size(link->resource[1]) == 8) {
+ link->conf.Attributes |= CONF_ENABLE_SPKR;
+ link->conf.Status = CCSR_AUDIO_ENA;
+ }
+ if ((link->manf_id == MANFID_IBM) &&
+ (link->card_id == PRODID_IBM_HOME_AND_AWAY))
+ link->conf.ConfigIndex |= 0x10;
+
+ ret = pcmcia_request_configuration(link, &link->conf);
+ if (ret)
+ return NULL;
+
+ dev->irq = link->irq;
+ dev->base_addr = link->resource[0]->start;
+
+ if (info->flags & HAS_MISC_REG) {
+ if ((if_port == 1) || (if_port == 2))
+ dev->if_port = if_port;
+ else
+ dev_notice(&link->dev, "invalid if_port requested\n");
+ } else
+ dev->if_port = 0;
+
+ if ((link->conf.ConfigBase == 0x03c0) &&
+ (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) {
+ dev_info(&link->dev,
+ "this is an AX88190 card - use axnet_cs instead.\n");
+ return NULL;
+ }
+
+ local_hw_info = get_hwinfo(link);
+ if (!local_hw_info)
+ local_hw_info = get_prom(link);
+ if (!local_hw_info)
+ local_hw_info = get_dl10019(link);
+ if (!local_hw_info)
+ local_hw_info = get_ax88190(link);
+ if (!local_hw_info)
+ local_hw_info = get_hwired(link);
+
+ return local_hw_info;
}
static int pcnet_config(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
pcnet_dev_t *info = PRIV(dev);
- int ret, start_pg, stop_pg, cm_offset;
+ int start_pg, stop_pg, cm_offset;
int has_shmem = 0;
hw_info_t *local_hw_info;
dev_dbg(&link->dev, "pcnet_config\n");
- ret = pcmcia_loop_config(link, pcnet_confcheck, &has_shmem);
- if (ret)
- goto failed;
-
- if (!link->irq)
- goto failed;
-
- if (resource_size(link->resource[1]) == 8) {
- link->conf.Attributes |= CONF_ENABLE_SPKR;
- link->conf.Status = CCSR_AUDIO_ENA;
- }
- if ((link->manf_id == MANFID_IBM) &&
- (link->card_id == PRODID_IBM_HOME_AND_AWAY))
- link->conf.ConfigIndex |= 0x10;
-
- ret = pcmcia_request_configuration(link, &link->conf);
- if (ret)
- goto failed;
- dev->irq = link->irq;
- dev->base_addr = link->resource[0]->start;
- if (info->flags & HAS_MISC_REG) {
- if ((if_port == 1) || (if_port == 2))
- dev->if_port = if_port;
- else
- printk(KERN_NOTICE "pcnet_cs: invalid if_port requested\n");
- } else {
- dev->if_port = 0;
- }
-
- if ((link->conf.ConfigBase == 0x03c0) &&
- (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) {
- printk(KERN_INFO "pcnet_cs: this is an AX88190 card!\n");
- printk(KERN_INFO "pcnet_cs: use axnet_cs instead.\n");
- goto failed;
- }
-
- local_hw_info = get_hwinfo(link);
- if (local_hw_info == NULL)
- local_hw_info = get_prom(link);
- if (local_hw_info == NULL)
- local_hw_info = get_dl10019(link);
- if (local_hw_info == NULL)
- local_hw_info = get_ax88190(link);
- if (local_hw_info == NULL)
- local_hw_info = get_hwired(link);
-
- if (local_hw_info == NULL) {
- printk(KERN_NOTICE "pcnet_cs: unable to read hardware net"
- " address for io base %#3lx\n", dev->base_addr);
- goto failed;
+ local_hw_info = pcnet_try_config(link, &has_shmem, 0);
+ if (!local_hw_info) {
+ /* check whether forcing io_lines to 16 helps... */
+ pcmcia_disable_device(link);
+ local_hw_info = pcnet_try_config(link, &has_shmem, 1);
+ if (local_hw_info == NULL) {
+ dev_notice(&link->dev, "unable to read hardware net"
+ " address for io base %#3lx\n", dev->base_addr);
+ goto failed;
+ }
}
info->flags = local_hw_info->flags;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 6a6b819..6c58da2 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -308,7 +308,7 @@
* may call phy routines that try to grab the same lock, and that may
* lead to a deadlock.
*/
- if (phydev->attached_dev)
+ if (phydev->attached_dev && phydev->adjust_link)
phy_stop_machine(phydev);
if (!mdio_bus_phy_may_suspend(phydev))
@@ -331,7 +331,7 @@
return ret;
no_resume:
- if (phydev->attached_dev)
+ if (phydev->attached_dev && phydev->adjust_link)
phy_start_machine(phydev, NULL);
return 0;
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 6695a51..736b917 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -1314,8 +1314,13 @@
hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
i = 0;
list_for_each_entry(pch, &ppp->channels, clist) {
- navail += pch->avail = (pch->chan != NULL);
- pch->speed = pch->chan->speed;
+ if (pch->chan) {
+ pch->avail = 1;
+ navail++;
+ pch->speed = pch->chan->speed;
+ } else {
+ pch->avail = 0;
+ }
if (pch->avail) {
if (skb_queue_empty(&pch->file.xq) ||
!pch->had_frag) {
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 75ba744..2c7cf0b6 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -1316,7 +1316,7 @@
return -ENOMEM;
}
- skb_reserve(skb, 2);
+ skb_reserve(skb, NET_IP_ALIGN);
dma = pci_map_single(pdev, skb->data,
rds_ring->dma_size, PCI_DMA_FROMDEVICE);
@@ -1404,7 +1404,6 @@
if (pkt_offset)
skb_pull(skb, pkt_offset);
- skb->truesize = skb->len + sizeof(struct sk_buff);
skb->protocol = eth_type_trans(skb, netdev);
napi_gro_receive(&sds_ring->napi, skb);
@@ -1466,8 +1465,6 @@
skb_put(skb, lro_length + data_offset);
- skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
-
skb_pull(skb, l2_hdr_offset);
skb->protocol = eth_type_trans(skb, netdev);
@@ -1700,8 +1697,6 @@
if (pkt_offset)
skb_pull(skb, pkt_offset);
- skb->truesize = skb->len + sizeof(struct sk_buff);
-
if (!qlcnic_check_loopback_buff(skb->data))
adapter->diag_cnt++;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 078bbf4..a0da4a1 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -2934,7 +2934,7 @@
.hw_start = rtl_hw_start_8168,
.region = 2,
.align = 8,
- .intr_event = SYSErr | LinkChg | RxOverflow |
+ .intr_event = SYSErr | RxFIFOOver | LinkChg | RxOverflow |
TxErr | TxOK | RxOK | RxErr,
.napi_event = TxErr | TxOK | RxOK | RxOverflow,
.features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
@@ -4625,8 +4625,7 @@
}
/* Work around for rx fifo overflow */
- if (unlikely(status & RxFIFOOver) &&
- (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
+ if (unlikely(status & RxFIFOOver)) {
netif_stop_queue(dev);
rtl8169_tx_timeout(dev);
break;
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 07eb884..44150f2 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -384,7 +384,7 @@
free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ?
__ilog2(sizeof(void *)) + 4 : 0);
unregister_netdev(ndev);
- kfree(ndev);
+ free_netdev(ndev);
list_for_each_entry_safe(peer, tmp, &rionet_peers, node) {
list_del(&peer->node);
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index cc4bd8c..9265315 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -804,7 +804,7 @@
err_out_free_page:
free_page((unsigned long) sp->srings);
err_out_free_dev:
- kfree(dev);
+ free_netdev(dev);
err_out:
return err;
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 0909ae9..8150ba1 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -58,6 +58,7 @@
MODULE_LICENSE("GPL");
MODULE_VERSION(SMSC_DRV_VERSION);
+MODULE_ALIAS("platform:smsc911x");
#if USE_DEBUG > 0
static int debug = 16;
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index bbb7951..ea0461e 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -1865,15 +1865,15 @@
if (!netif_running(dev))
return 0;
- spin_lock(&priv->lock);
-
if (priv->shutdown) {
/* Re-open the interface and re-init the MAC/DMA
- and the rings. */
+ and the rings (i.e. on hibernation stage) */
stmmac_open(dev);
- goto out_resume;
+ return 0;
}
+ spin_lock(&priv->lock);
+
/* Power Down bit, into the PM register, is cleared
* automatically as soon as a magic packet or a Wake-up frame
* is received. Anyway, it's better to manually clear
@@ -1901,7 +1901,6 @@
netif_start_queue(dev);
-out_resume:
spin_unlock(&priv->lock);
return 0;
}
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 5efa577..6888e3d 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -243,6 +243,7 @@
NWayState = (1 << 14) | (1 << 13) | (1 << 12),
NWayRestart = (1 << 12),
NonselPortActive = (1 << 9),
+ SelPortActive = (1 << 8),
LinkFailStatus = (1 << 2),
NetCxnErr = (1 << 1),
};
@@ -363,7 +364,9 @@
/* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
-static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
+static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
+/* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */
+static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
@@ -1064,6 +1067,9 @@
unsigned int carrier;
unsigned long flags;
+ /* clear port active bits */
+ dw32(SIAStatus, NonselPortActive | SelPortActive);
+
carrier = (status & NetCxnErr) ? 0 : 1;
if (carrier) {
@@ -1158,14 +1164,29 @@
static void de_media_interrupt (struct de_private *de, u32 status)
{
if (status & LinkPass) {
+ /* Ignore if current media is AUI or BNC and we can't use TP */
+ if ((de->media_type == DE_MEDIA_AUI ||
+ de->media_type == DE_MEDIA_BNC) &&
+ (de->media_lock ||
+ !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO)))
+ return;
+ /* If current media is not TP, change it to TP */
+ if ((de->media_type == DE_MEDIA_AUI ||
+ de->media_type == DE_MEDIA_BNC)) {
+ de->media_type = DE_MEDIA_TP_AUTO;
+ de_stop_rxtx(de);
+ de_set_media(de);
+ de_start_rxtx(de);
+ }
de_link_up(de);
mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
return;
}
BUG_ON(!(status & LinkFail));
-
- if (netif_carrier_ok(de->dev)) {
+ /* Mark the link as down only if current media is TP */
+ if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI &&
+ de->media_type != DE_MEDIA_BNC) {
de_link_down(de);
mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
}
@@ -1229,6 +1250,7 @@
if (de->de21040)
return;
+ dw32(CSR13, 0); /* Reset phy */
pci_read_config_dword(de->pdev, PCIPM, &pmctl);
pmctl |= PM_Sleep;
pci_write_config_dword(de->pdev, PCIPM, pmctl);
@@ -1574,12 +1596,15 @@
return 0; /* nothing to change */
de_link_down(de);
+ mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
de_stop_rxtx(de);
de->media_type = new_media;
de->media_lock = media_lock;
de->media_advertise = ecmd->advertising;
de_set_media(de);
+ if (netif_running(de->dev))
+ de_start_rxtx(de);
return 0;
}
@@ -1911,8 +1936,14 @@
for (i = 0; i < DE_MAX_MEDIA; i++) {
if (de->media[i].csr13 == 0xffff)
de->media[i].csr13 = t21041_csr13[i];
- if (de->media[i].csr14 == 0xffff)
- de->media[i].csr14 = t21041_csr14[i];
+ if (de->media[i].csr14 == 0xffff) {
+ /* autonegotiation is broken at least on some chip
+ revisions - rev. 0x21 works, 0x11 does not */
+ if (de->pdev->revision < 0x20)
+ de->media[i].csr14 = t21041_csr14_brk[i];
+ else
+ de->media[i].csr14 = t21041_csr14[i];
+ }
if (de->media[i].csr15 == 0xffff)
de->media[i].csr15 = t21041_csr15[i];
}
@@ -2158,6 +2189,8 @@
dev_err(&dev->dev, "pci_enable_device failed in resume\n");
goto out;
}
+ pci_set_master(pdev);
+ de_init_rings(de);
de_init_hw(de);
out_attach:
netif_device_attach(dev);
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 6efca66..1cd752f 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1652,6 +1652,8 @@
struct uart_icount cnow;
struct hso_tiocmget *tiocmget = serial->tiocmget;
+ memset(&icount, 0, sizeof(struct serial_icounter_struct));
+
if (!tiocmget)
return -ENOENT;
spin_lock_irq(&serial->serial_lock);
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 8ed30fa..b2bcf99 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -429,10 +429,6 @@
.ndo_get_stats = &ipheth_stats,
};
-static struct device_type ipheth_type = {
- .name = "wwan",
-};
-
static int ipheth_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -450,7 +446,7 @@
netdev->netdev_ops = &ipheth_netdev_ops;
netdev->watchdog_timeo = IPHETH_TX_TIMEOUT;
- strcpy(netdev->name, "wwan%d");
+ strcpy(netdev->name, "eth%d");
dev = netdev_priv(netdev);
dev->udev = udev;
@@ -500,7 +496,6 @@
SET_NETDEV_DEV(netdev, &intf->dev);
SET_ETHTOOL_OPS(netdev, &ops);
- SET_NETDEV_DEVTYPE(netdev, &ipheth_type);
retval = register_netdev(netdev);
if (retval) {
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index fd69095..f534123 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -2824,7 +2824,7 @@
netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
- NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM | NETIF_F_SG;
+ NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM;
ret = register_netdev(dev);
if (ret < 0)
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 07dbc27..e23c406 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -2613,6 +2613,11 @@
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return -EINVAL;
+ if (test_bit(STATUS_SCANNING, &priv->status)) {
+ IWL_DEBUG_INFO(priv, "scan in progress.\n");
+ return -EINVAL;
+ }
+
if (mode >= IWL_MAX_FORCE_RESET) {
IWL_DEBUG_INFO(priv, "invalid reset request.\n");
return -EINVAL;
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index a9352b2..b7e755f 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -141,16 +141,6 @@
.notifier_call = module_load_notify,
};
-
-static void end_sync(void)
-{
- end_cpu_work();
- /* make sure we don't leak task structs */
- process_task_mortuary();
- process_task_mortuary();
-}
-
-
int sync_start(void)
{
int err;
@@ -158,7 +148,7 @@
if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
return -ENOMEM;
- start_cpu_work();
+ mutex_lock(&buffer_mutex);
err = task_handoff_register(&task_free_nb);
if (err)
@@ -173,7 +163,10 @@
if (err)
goto out4;
+ start_cpu_work();
+
out:
+ mutex_unlock(&buffer_mutex);
return err;
out4:
profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
@@ -182,7 +175,6 @@
out2:
task_handoff_unregister(&task_free_nb);
out1:
- end_sync();
free_cpumask_var(marked_cpus);
goto out;
}
@@ -190,11 +182,20 @@
void sync_stop(void)
{
+ /* flush buffers */
+ mutex_lock(&buffer_mutex);
+ end_cpu_work();
unregister_module_notifier(&module_load_nb);
profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
task_handoff_unregister(&task_free_nb);
- end_sync();
+ mutex_unlock(&buffer_mutex);
+ flush_scheduled_work();
+
+ /* make sure we don't leak task structs */
+ process_task_mortuary();
+ process_task_mortuary();
+
free_cpumask_var(marked_cpus);
}
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 219f79e..f179ac2 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -120,8 +120,6 @@
cancel_delayed_work(&b->work);
}
-
- flush_scheduled_work();
}
/*
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index c3ceebb..4789f8e 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -71,6 +71,49 @@
#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
+/* page table handling */
+#define LEVEL_STRIDE (9)
+#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
+
+static inline int agaw_to_level(int agaw)
+{
+ return agaw + 2;
+}
+
+static inline int agaw_to_width(int agaw)
+{
+ return 30 + agaw * LEVEL_STRIDE;
+}
+
+static inline int width_to_agaw(int width)
+{
+ return (width - 30) / LEVEL_STRIDE;
+}
+
+static inline unsigned int level_to_offset_bits(int level)
+{
+ return (level - 1) * LEVEL_STRIDE;
+}
+
+static inline int pfn_level_offset(unsigned long pfn, int level)
+{
+ return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
+}
+
+static inline unsigned long level_mask(int level)
+{
+ return -1UL << level_to_offset_bits(level);
+}
+
+static inline unsigned long level_size(int level)
+{
+ return 1UL << level_to_offset_bits(level);
+}
+
+static inline unsigned long align_to_level(unsigned long pfn, int level)
+{
+ return (pfn + level_size(level) - 1) & level_mask(level);
+}
/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
are never going to work. */
@@ -434,8 +477,6 @@
}
-static inline int width_to_agaw(int width);
-
static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
{
unsigned long sagaw;
@@ -646,51 +687,6 @@
spin_unlock_irqrestore(&iommu->lock, flags);
}
-/* page table handling */
-#define LEVEL_STRIDE (9)
-#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
-
-static inline int agaw_to_level(int agaw)
-{
- return agaw + 2;
-}
-
-static inline int agaw_to_width(int agaw)
-{
- return 30 + agaw * LEVEL_STRIDE;
-
-}
-
-static inline int width_to_agaw(int width)
-{
- return (width - 30) / LEVEL_STRIDE;
-}
-
-static inline unsigned int level_to_offset_bits(int level)
-{
- return (level - 1) * LEVEL_STRIDE;
-}
-
-static inline int pfn_level_offset(unsigned long pfn, int level)
-{
- return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
-}
-
-static inline unsigned long level_mask(int level)
-{
- return -1UL << level_to_offset_bits(level);
-}
-
-static inline unsigned long level_size(int level)
-{
- return 1UL << level_to_offset_bits(level);
-}
-
-static inline unsigned long align_to_level(unsigned long pfn, int level)
-{
- return (pfn + level_size(level) - 1) & level_mask(level);
-}
-
static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
unsigned long pfn)
{
@@ -3761,6 +3757,33 @@
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
+#define GGC 0x52
+#define GGC_MEMORY_SIZE_MASK (0xf << 8)
+#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
+#define GGC_MEMORY_SIZE_1M (0x1 << 8)
+#define GGC_MEMORY_SIZE_2M (0x3 << 8)
+#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
+#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
+#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
+#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
+
+static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
+{
+ unsigned short ggc;
+
+ if (pci_read_config_word(dev, GGC, &ggc))
+ return;
+
+ if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
+ printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
+ dmar_map_gfx = 0;
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
+
/* On Tylersburg chipsets, some BIOSes have been known to enable the
ISOCH DMAR unit for the Azalia sound device, but not give it any
TLB entries, which causes it to deadlock. Check for that. We do
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index ce6a366..553d8ee 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -608,7 +608,7 @@
* the VF BAR size multiplied by the number of VFs. The alignment
* is just the VF BAR size.
*/
-int pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
+resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
{
struct resource tmp;
enum pci_bar_type type;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 7754a67..6beb11b 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -264,7 +264,8 @@
extern void pci_iov_release(struct pci_dev *dev);
extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
enum pci_bar_type *type);
-extern int pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
+extern resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev,
+ int resno);
extern void pci_restore_iov_state(struct pci_dev *dev);
extern int pci_iov_bus_range(struct pci_bus *bus);
@@ -320,7 +321,7 @@
}
#endif /* CONFIG_PCI_IOV */
-static inline int pci_resource_alignment(struct pci_dev *dev,
+static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
struct resource *res)
{
#ifdef CONFIG_PCI_IOV
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index 54aa1c2..9ba4dad 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -163,7 +163,7 @@
c = p_dev->function_config;
if (!(c->state & CONFIG_LOCKED)) {
- dev_dbg(&s->dev, "Configuration isnt't locked\n");
+ dev_dbg(&p_dev->dev, "Configuration isnt't locked\n");
mutex_unlock(&s->ops_mutex);
return -EACCES;
}
@@ -220,7 +220,7 @@
s->win[w].card_start = offset;
ret = s->ops->set_mem_map(s, &s->win[w]);
if (ret)
- dev_warn(&s->dev, "failed to set_mem_map\n");
+ dev_warn(&p_dev->dev, "failed to set_mem_map\n");
mutex_unlock(&s->ops_mutex);
return ret;
} /* pcmcia_map_mem_page */
@@ -244,18 +244,18 @@
c = p_dev->function_config;
if (!(s->state & SOCKET_PRESENT)) {
- dev_dbg(&s->dev, "No card present\n");
+ dev_dbg(&p_dev->dev, "No card present\n");
ret = -ENODEV;
goto unlock;
}
if (!(c->state & CONFIG_LOCKED)) {
- dev_dbg(&s->dev, "Configuration isnt't locked\n");
+ dev_dbg(&p_dev->dev, "Configuration isnt't locked\n");
ret = -EACCES;
goto unlock;
}
if (mod->Attributes & (CONF_IRQ_CHANGE_VALID | CONF_VCC_CHANGE_VALID)) {
- dev_dbg(&s->dev,
+ dev_dbg(&p_dev->dev,
"changing Vcc or IRQ is not allowed at this time\n");
ret = -EINVAL;
goto unlock;
@@ -265,20 +265,22 @@
if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) &&
(mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
if (mod->Vpp1 != mod->Vpp2) {
- dev_dbg(&s->dev, "Vpp1 and Vpp2 must be the same\n");
+ dev_dbg(&p_dev->dev,
+ "Vpp1 and Vpp2 must be the same\n");
ret = -EINVAL;
goto unlock;
}
s->socket.Vpp = mod->Vpp1;
if (s->ops->set_socket(s, &s->socket)) {
- dev_printk(KERN_WARNING, &s->dev,
+ dev_printk(KERN_WARNING, &p_dev->dev,
"Unable to set VPP\n");
ret = -EIO;
goto unlock;
}
} else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) ||
(mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
- dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n");
+ dev_dbg(&p_dev->dev,
+ "changing Vcc is not allowed at this time\n");
ret = -EINVAL;
goto unlock;
}
@@ -401,7 +403,7 @@
win = &s->win[w];
if (!(p_dev->_win & CLIENT_WIN_REQ(w))) {
- dev_dbg(&s->dev, "not releasing unknown window\n");
+ dev_dbg(&p_dev->dev, "not releasing unknown window\n");
mutex_unlock(&s->ops_mutex);
return -EINVAL;
}
@@ -439,7 +441,7 @@
return -ENODEV;
if (req->IntType & INT_CARDBUS) {
- dev_dbg(&s->dev, "IntType may not be INT_CARDBUS\n");
+ dev_dbg(&p_dev->dev, "IntType may not be INT_CARDBUS\n");
return -EINVAL;
}
@@ -447,7 +449,7 @@
c = p_dev->function_config;
if (c->state & CONFIG_LOCKED) {
mutex_unlock(&s->ops_mutex);
- dev_dbg(&s->dev, "Configuration is locked\n");
+ dev_dbg(&p_dev->dev, "Configuration is locked\n");
return -EACCES;
}
@@ -455,7 +457,7 @@
s->socket.Vpp = req->Vpp;
if (s->ops->set_socket(s, &s->socket)) {
mutex_unlock(&s->ops_mutex);
- dev_printk(KERN_WARNING, &s->dev,
+ dev_printk(KERN_WARNING, &p_dev->dev,
"Unable to set socket state\n");
return -EINVAL;
}
@@ -569,19 +571,20 @@
int ret = -EINVAL;
mutex_lock(&s->ops_mutex);
- dev_dbg(&s->dev, "pcmcia_request_io: %pR , %pR", &c->io[0], &c->io[1]);
+ dev_dbg(&p_dev->dev, "pcmcia_request_io: %pR , %pR",
+ &c->io[0], &c->io[1]);
if (!(s->state & SOCKET_PRESENT)) {
- dev_dbg(&s->dev, "pcmcia_request_io: No card present\n");
+ dev_dbg(&p_dev->dev, "pcmcia_request_io: No card present\n");
goto out;
}
if (c->state & CONFIG_LOCKED) {
- dev_dbg(&s->dev, "Configuration is locked\n");
+ dev_dbg(&p_dev->dev, "Configuration is locked\n");
goto out;
}
if (c->state & CONFIG_IO_REQ) {
- dev_dbg(&s->dev, "IO already configured\n");
+ dev_dbg(&p_dev->dev, "IO already configured\n");
goto out;
}
@@ -592,7 +595,13 @@
if (c->io[1].end) {
ret = alloc_io_space(s, &c->io[1], p_dev->io_lines);
if (ret) {
+ struct resource tmp = c->io[0];
+ /* release the previously allocated resource */
release_io_space(s, &c->io[0]);
+ /* but preserve the settings, for they worked... */
+ c->io[0].end = resource_size(&tmp);
+ c->io[0].start = tmp.start;
+ c->io[0].flags = tmp.flags;
goto out;
}
} else
@@ -601,7 +610,7 @@
c->state |= CONFIG_IO_REQ;
p_dev->_io = 1;
- dev_dbg(&s->dev, "pcmcia_request_io succeeded: %pR , %pR",
+ dev_dbg(&p_dev->dev, "pcmcia_request_io succeeded: %pR , %pR",
&c->io[0], &c->io[1]);
out:
mutex_unlock(&s->ops_mutex);
@@ -800,7 +809,7 @@
int w;
if (!(s->state & SOCKET_PRESENT)) {
- dev_dbg(&s->dev, "No card present\n");
+ dev_dbg(&p_dev->dev, "No card present\n");
return -ENODEV;
}
@@ -809,12 +818,12 @@
req->Size = s->map_size;
align = (s->features & SS_CAP_MEM_ALIGN) ? req->Size : s->map_size;
if (req->Size & (s->map_size-1)) {
- dev_dbg(&s->dev, "invalid map size\n");
+ dev_dbg(&p_dev->dev, "invalid map size\n");
return -EINVAL;
}
if ((req->Base && (s->features & SS_CAP_STATIC_MAP)) ||
(req->Base & (align-1))) {
- dev_dbg(&s->dev, "invalid base address\n");
+ dev_dbg(&p_dev->dev, "invalid base address\n");
return -EINVAL;
}
if (req->Base)
@@ -826,7 +835,7 @@
if (!(s->state & SOCKET_WIN_REQ(w)))
break;
if (w == MAX_WIN) {
- dev_dbg(&s->dev, "all windows are used already\n");
+ dev_dbg(&p_dev->dev, "all windows are used already\n");
mutex_unlock(&s->ops_mutex);
return -EINVAL;
}
@@ -837,7 +846,7 @@
win->res = pcmcia_find_mem_region(req->Base, req->Size, align,
0, s);
if (!win->res) {
- dev_dbg(&s->dev, "allocating mem region failed\n");
+ dev_dbg(&p_dev->dev, "allocating mem region failed\n");
mutex_unlock(&s->ops_mutex);
return -EINVAL;
}
@@ -851,7 +860,7 @@
win->card_start = 0;
if (s->ops->set_mem_map(s, win) != 0) {
- dev_dbg(&s->dev, "failed to set memory mapping\n");
+ dev_dbg(&p_dev->dev, "failed to set memory mapping\n");
mutex_unlock(&s->ops_mutex);
return -EIO;
}
@@ -874,7 +883,7 @@
if (win->res)
request_resource(&iomem_resource, res);
- dev_dbg(&s->dev, "request_window results in %pR\n", res);
+ dev_dbg(&p_dev->dev, "request_window results in %pR\n", res);
mutex_unlock(&s->ops_mutex);
*wh = res;
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index b8a869a..deef665 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -646,7 +646,7 @@
if (!pci_resource_start(dev, 0)) {
dev_warn(&dev->dev, "refusing to load the driver as the "
"io_base is NULL.\n");
- goto err_out_free_mem;
+ goto err_out_disable;
}
dev_info(&dev->dev, "Cirrus PD6729 PCI to PCMCIA Bridge at 0x%llx "
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e35ed12..2d61186 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -3093,7 +3093,8 @@
TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */
};
-typedef u16 tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN];
+typedef u16 tpacpi_keymap_entry_t;
+typedef tpacpi_keymap_entry_t tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN];
static int __init hotkey_init(struct ibm_init_struct *iibm)
{
@@ -3230,7 +3231,7 @@
};
#define TPACPI_HOTKEY_MAP_SIZE sizeof(tpacpi_keymap_t)
-#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(tpacpi_keymap_t[0])
+#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(tpacpi_keymap_entry_t)
int res, i;
int status;
diff --git a/drivers/power/apm_power.c b/drivers/power/apm_power.c
index 936bae5..dc628cb 100644
--- a/drivers/power/apm_power.c
+++ b/drivers/power/apm_power.c
@@ -233,6 +233,7 @@
empty_design_prop = POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN;
now_prop = POWER_SUPPLY_PROP_ENERGY_NOW;
avg_prop = POWER_SUPPLY_PROP_ENERGY_AVG;
+ break;
case SOURCE_VOLTAGE:
full_prop = POWER_SUPPLY_PROP_VOLTAGE_MAX;
empty_prop = POWER_SUPPLY_PROP_VOLTAGE_MIN;
diff --git a/drivers/power/intel_mid_battery.c b/drivers/power/intel_mid_battery.c
index c61ffec..2a10cd3 100644
--- a/drivers/power/intel_mid_battery.c
+++ b/drivers/power/intel_mid_battery.c
@@ -185,8 +185,8 @@
{
u32 data[3];
u8 *p = (u8 *)&data[1];
- int err = intel_scu_ipc_command(IPC_CMD_BATTERY_PROPERTY,
- IPCMSG_BATTERY, NULL, 0, data, 3);
+ int err = intel_scu_ipc_command(IPCMSG_BATTERY,
+ IPC_CMD_BATTERY_PROPERTY, NULL, 0, data, 3);
prop->capacity = data[0];
prop->crnt = *p++;
@@ -207,7 +207,7 @@
static int pmic_scu_ipc_set_charger(int charger)
{
- return intel_scu_ipc_simple_command(charger, IPCMSG_BATTERY);
+ return intel_scu_ipc_simple_command(IPCMSG_BATTERY, charger);
}
/**
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index 7d149a8..2ce2eb7 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -215,7 +215,7 @@
struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
int ret = -EINVAL;
- if (info->vol_table && (index < (2 << info->vol_nbits))) {
+ if (info->vol_table && (index < (1 << info->vol_nbits))) {
ret = info->vol_table[index];
if (info->slope_double)
ret <<= 1;
@@ -233,7 +233,7 @@
max_uV = max_uV >> 1;
}
if (info->vol_table) {
- for (i = 0; i < (2 << info->vol_nbits); i++) {
+ for (i = 0; i < (1 << info->vol_nbits); i++) {
if (!info->vol_table[i])
break;
if ((min_uV <= info->vol_table[i])
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 1179099..b349266 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -634,12 +634,9 @@
"%s: failed to register regulator %s err %d\n",
__func__, ab3100_regulator_desc[i].name,
err);
- i--;
/* remove the already registered regulators */
- while (i > 0) {
+ while (--i >= 0)
regulator_unregister(ab3100_regulators[i].rdev);
- i--;
- }
return err;
}
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index dc3f1a4..28c7ae6 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -157,7 +157,7 @@
if (info->fixed_uV)
return info->fixed_uV;
- if (selector > info->voltages_len)
+ if (selector >= info->voltages_len)
return -EINVAL;
return info->supported_voltages[selector];
@@ -344,13 +344,14 @@
static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
{
struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
- struct ab8500_platform_data *pdata = dev_get_platdata(ab8500->dev);
+ struct ab8500_platform_data *pdata;
int i, err;
if (!ab8500) {
dev_err(&pdev->dev, "null mfd parent\n");
return -EINVAL;
}
+ pdata = dev_get_platdata(ab8500->dev);
/* register all regulators */
for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
@@ -368,11 +369,9 @@
dev_err(&pdev->dev, "failed to register regulator %s\n",
info->desc.name);
/* when we fail, un-register all earlier regulators */
- i--;
- while (i > 0) {
+ while (--i >= 0) {
info = &ab8500_regulator_info[i];
regulator_unregister(info->regulator);
- i--;
}
return err;
}
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c
index d59d2f2..df1fb53 100644
--- a/drivers/regulator/ad5398.c
+++ b/drivers/regulator/ad5398.c
@@ -25,7 +25,7 @@
unsigned int current_level;
unsigned int current_mask;
unsigned int current_offset;
- struct regulator_dev rdev;
+ struct regulator_dev *rdev;
};
static int ad5398_calc_current(struct ad5398_chip_info *chip,
@@ -211,7 +211,6 @@
static int __devinit ad5398_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct regulator_dev *rdev;
struct regulator_init_data *init_data = client->dev.platform_data;
struct ad5398_chip_info *chip;
const struct ad5398_current_data_format *df =
@@ -233,9 +232,10 @@
chip->current_offset = df->current_offset;
chip->current_mask = (chip->current_level - 1) << chip->current_offset;
- rdev = regulator_register(&ad5398_reg, &client->dev, init_data, chip);
- if (IS_ERR(rdev)) {
- ret = PTR_ERR(rdev);
+ chip->rdev = regulator_register(&ad5398_reg, &client->dev,
+ init_data, chip);
+ if (IS_ERR(chip->rdev)) {
+ ret = PTR_ERR(chip->rdev);
dev_err(&client->dev, "failed to register %s %s\n",
id->name, ad5398_reg.name);
goto err;
@@ -254,7 +254,7 @@
{
struct ad5398_chip_info *chip = i2c_get_clientdata(client);
- regulator_unregister(&chip->rdev);
+ regulator_unregister(chip->rdev);
kfree(chip);
i2c_set_clientdata(client, NULL);
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c
index e49d2bd..d61ecb8 100644
--- a/drivers/regulator/isl6271a-regulator.c
+++ b/drivers/regulator/isl6271a-regulator.c
@@ -165,7 +165,7 @@
mutex_init(&pmic->mtx);
for (i = 0; i < 3; i++) {
- pmic->rdev[i] = regulator_register(&isl_rd[0], &i2c->dev,
+ pmic->rdev[i] = regulator_register(&isl_rd[i], &i2c->dev,
init_data, pmic);
if (IS_ERR(pmic->rdev[i])) {
dev_err(&i2c->dev, "failed to register %s\n", id->name);
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index 8867c27..559cfa2 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -121,14 +121,14 @@
if (max_uV < MAX1586_V6_MIN_UV || max_uV > MAX1586_V6_MAX_UV)
return -EINVAL;
- if (min_uV >= 3000000)
- selector = 3;
- if (min_uV < 3000000)
- selector = 2;
- if (min_uV < 2500000)
- selector = 1;
if (min_uV < 1800000)
selector = 0;
+ else if (min_uV < 2500000)
+ selector = 1;
+ else if (min_uV < 3000000)
+ selector = 2;
+ else if (min_uV >= 3000000)
+ selector = 3;
if (max1586_v6_calc_voltage(selector) > max_uV)
return -EINVAL;
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index ab67298..a1baf1f 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -549,7 +549,7 @@
if (!max8998)
return -ENOMEM;
- size = sizeof(struct regulator_dev *) * (pdata->num_regulators + 1);
+ size = sizeof(struct regulator_dev *) * pdata->num_regulators;
max8998->rdev = kzalloc(size, GFP_KERNEL);
if (!max8998->rdev) {
kfree(max8998);
@@ -557,7 +557,9 @@
}
rdev = max8998->rdev;
+ max8998->dev = &pdev->dev;
max8998->iodev = iodev;
+ max8998->num_regulators = pdata->num_regulators;
platform_set_drvdata(pdev, max8998);
for (i = 0; i < pdata->num_regulators; i++) {
@@ -583,7 +585,7 @@
return 0;
err:
- for (i = 0; i <= max8998->num_regulators; i++)
+ for (i = 0; i < max8998->num_regulators; i++)
if (rdev[i])
regulator_unregister(rdev[i]);
@@ -599,7 +601,7 @@
struct regulator_dev **rdev = max8998->rdev;
int i;
- for (i = 0; i <= max8998->num_regulators; i++)
+ for (i = 0; i < max8998->num_regulators; i++)
if (rdev[i])
regulator_unregister(rdev[i]);
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index c239f42..020f587 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -626,12 +626,6 @@
return error;
}
-/**
- * tps6507x_remove - TPS6507x driver i2c remove handler
- * @client: i2c driver client device structure
- *
- * Unregister TPS driver as an i2c client device driver
- */
static int __devexit tps6507x_pmic_remove(struct platform_device *pdev)
{
struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev);
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index 8cff141..51237fb 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -133,7 +133,7 @@
mask = ((1 << ri->volt_nbits) - 1) << ri->volt_shift;
val = (val & mask) >> ri->volt_shift;
- if (val > ri->desc.n_voltages)
+ if (val >= ri->desc.n_voltages)
BUG();
return ri->voltages[val] * 1000;
@@ -150,7 +150,7 @@
if (ret)
return ret;
- return tps6586x_set_bits(parent, ri->go_reg, ri->go_bit);
+ return tps6586x_set_bits(parent, ri->go_reg, 1 << ri->go_bit);
}
static int tps6586x_regulator_enable(struct regulator_dev *rdev)
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index e686cdb..9edf8f6 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -215,8 +215,7 @@
case REGULATOR_MODE_IDLE:
ret = wm831x_set_bits(wm831x, ctrl_reg,
- WM831X_LDO1_LP_MODE,
- WM831X_LDO1_LP_MODE);
+ WM831X_LDO1_LP_MODE, 0);
if (ret < 0)
return ret;
@@ -225,10 +224,12 @@
WM831X_LDO1_ON_MODE);
if (ret < 0)
return ret;
+ break;
case REGULATOR_MODE_STANDBY:
ret = wm831x_set_bits(wm831x, ctrl_reg,
- WM831X_LDO1_LP_MODE, 0);
+ WM831X_LDO1_LP_MODE,
+ WM831X_LDO1_LP_MODE);
if (ret < 0)
return ret;
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 0e6ed7d..fe4b8a8 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1129,7 +1129,7 @@
mode = REGULATOR_MODE_NORMAL;
} else if (!active && !sleep)
mode = REGULATOR_MODE_IDLE;
- else if (!sleep)
+ else if (sleep)
mode = REGULATOR_MODE_STANDBY;
return mode;
diff --git a/drivers/rtc/rtc-ab3100.c b/drivers/rtc/rtc-ab3100.c
index d26780e..261a07e 100644
--- a/drivers/rtc/rtc-ab3100.c
+++ b/drivers/rtc/rtc-ab3100.c
@@ -235,6 +235,7 @@
err = PTR_ERR(rtc);
return err;
}
+ platform_set_drvdata(pdev, rtc);
return 0;
}
@@ -244,6 +245,7 @@
struct rtc_device *rtc = platform_get_drvdata(pdev);
rtc_device_unregister(rtc);
+ platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index 72b2bcc..d4fb82d 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -426,7 +426,7 @@
enable_irq_wake(IRQ_RTC);
bfin_rtc_sync_pending(&pdev->dev);
} else
- bfin_rtc_int_clear(-1);
+ bfin_rtc_int_clear(0);
return 0;
}
@@ -435,8 +435,17 @@
{
if (device_may_wakeup(&pdev->dev))
disable_irq_wake(IRQ_RTC);
- else
- bfin_write_RTC_ISTAT(-1);
+
+ /*
+ * Since only some of the RTC bits are maintained externally in the
+ * Vbat domain, we need to wait for the RTC MMRs to be synced into
+ * the core after waking up. This happens every RTC 1HZ. Once that
+ * has happened, we can go ahead and re-enable the important write
+ * complete interrupt event.
+ */
+ while (!(bfin_read_RTC_ISTAT() & RTC_ISTAT_SEC))
+ continue;
+ bfin_rtc_int_set(RTC_ISTAT_WRITE_COMPLETE);
return 0;
}
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 66377f3..d60557c 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -364,7 +364,7 @@
t->time.tm_isdst = -1;
t->enabled = !!(reg[M41T80_REG_ALARM_MON] & M41T80_ALMON_AFE);
t->pending = !!(reg[M41T80_REG_FLAGS] & M41T80_FLAGS_AF);
- return rtc_valid_tm(t);
+ return 0;
}
static struct rtc_class_ops m41t80_rtc_ops = {
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index 6c418fe..b7a6690 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -403,7 +403,7 @@
}
if (request_irq(adev->irq[0], pl031_interrupt,
- IRQF_DISABLED | IRQF_SHARED, "rtc-pl031", ldata)) {
+ IRQF_DISABLED, "rtc-pl031", ldata)) {
ret = -EIO;
goto out_no_irq;
}
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index a0d3ec8..f57a87f 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -310,11 +310,6 @@
s3c_rtc_setaie(alrm->enabled);
- if (alrm->enabled)
- enable_irq_wake(s3c_rtc_alarmno);
- else
- disable_irq_wake(s3c_rtc_alarmno);
-
return 0;
}
@@ -587,6 +582,10 @@
ticnt_en_save &= S3C64XX_RTCCON_TICEN;
}
s3c_rtc_enable(pdev, 0);
+
+ if (device_may_wakeup(&pdev->dev))
+ enable_irq_wake(s3c_rtc_alarmno);
+
return 0;
}
@@ -600,6 +599,10 @@
tmp = readb(s3c_rtc_base + S3C2410_RTCCON);
writeb(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
}
+
+ if (device_may_wakeup(&pdev->dev))
+ disable_irq_wake(s3c_rtc_alarmno);
+
return 0;
}
#else
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index b7de025..85cf607 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -217,8 +217,7 @@
if (!blkdat->request_queue)
return -ENOMEM;
- elevator_exit(blkdat->request_queue->elevator);
- rc = elevator_init(blkdat->request_queue, "noop");
+ rc = elevator_change(blkdat->request_queue, "noop");
if (rc)
goto cleanup_queue;
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 6edf20b..2c7d2d9 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1154,7 +1154,7 @@
dev_fsm, dev_fsm_len, GFP_KERNEL);
if (priv->fsm == NULL) {
CTCMY_DBF_DEV(SETUP, dev, "init_fsm error");
- kfree(dev);
+ free_netdev(dev);
return NULL;
}
fsm_newstate(priv->fsm, DEV_STATE_STOPPED);
@@ -1165,7 +1165,7 @@
grp = ctcmpc_init_mpc_group(priv);
if (grp == NULL) {
MPC_DBF_DEV(SETUP, dev, "init_mpc_group error");
- kfree(dev);
+ free_netdev(dev);
return NULL;
}
tasklet_init(&grp->mpc_tasklet2,
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 7d4d227..7f11f3e 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -300,8 +300,7 @@
enum iscsi_host_param param, char *buf)
{
struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost);
- int len = 0;
- int status;
+ int status = 0;
SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param);
switch (param) {
@@ -315,7 +314,7 @@
default:
return iscsi_host_get_param(shost, param, buf);
}
- return len;
+ return status;
}
int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba)
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 26350e4..877324f 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -368,7 +368,7 @@
memset(req, 0, sizeof(*req));
wrb->tag0 |= tag;
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 1);
+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD,
sizeof(*req));
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index cd05e04..d0c8234 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -1404,13 +1404,13 @@
{
struct scsi_sense_hdr sshdr;
- scmd_printk(KERN_INFO, cmd, "");
+ scmd_printk(KERN_INFO, cmd, " ");
scsi_decode_sense_buffer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
&sshdr);
scsi_show_sense_hdr(&sshdr);
scsi_decode_sense_extras(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
&sshdr);
- scmd_printk(KERN_INFO, cmd, "");
+ scmd_printk(KERN_INFO, cmd, " ");
scsi_show_extd_sense(sshdr.asc, sshdr.ascq);
}
EXPORT_SYMBOL(scsi_print_sense);
@@ -1453,7 +1453,7 @@
void scsi_print_result(struct scsi_cmnd *cmd)
{
- scmd_printk(KERN_INFO, cmd, "");
+ scmd_printk(KERN_INFO, cmd, " ");
scsi_show_result(cmd->result);
}
EXPORT_SYMBOL(scsi_print_result);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 4f5551b..c5d0606 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -3231,6 +3231,12 @@
misc_fw_support = readl(&cfgtable->misc_fw_support);
use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
+ /* The doorbell reset seems to cause lockups on some Smart
+ * Arrays (e.g. P410, P410i, maybe others). Until this is
+ * fixed or at least isolated, avoid the doorbell reset.
+ */
+ use_doorbell = 0;
+
rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
if (rc)
goto unmap_cfgtable;
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index fda4de3..e88bbdd 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -865,7 +865,7 @@
{
_osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
WARN_ON(or->in.bio || or->in.total_bytes);
- WARN_ON(1 == (bio->bi_rw & REQ_WRITE));
+ WARN_ON(bio->bi_rw & REQ_WRITE);
or->in.bio = bio;
or->in.total_bytes = len;
}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 420238c..114bc5a 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1838,26 +1838,33 @@
qla24xx_disable_vp(vha);
+ vha->flags.delete_progress = 1;
+
fc_remove_host(vha->host);
scsi_remove_host(vha->host);
- qla2x00_free_fcports(vha);
+ if (vha->timer_active) {
+ qla2x00_vp_stop_timer(vha);
+ DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]"
+ " = %p has stopped\n", vha->host_no, vha->vp_idx, vha));
+ }
qla24xx_deallocate_vp_id(vha);
+ /* No pending activities shall be there on the vha now */
+ DEBUG(msleep(random32()%10)); /* Just to see if something falls on
+ * the net we have placed below */
+
+ BUG_ON(atomic_read(&vha->vref_count));
+
+ qla2x00_free_fcports(vha);
+
mutex_lock(&ha->vport_lock);
ha->cur_vport_count--;
clear_bit(vha->vp_idx, ha->vp_idx_map);
mutex_unlock(&ha->vport_lock);
- if (vha->timer_active) {
- qla2x00_vp_stop_timer(vha);
- DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p "
- "has stopped\n",
- vha->host_no, vha->vp_idx, vha));
- }
-
if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
qla_printk(KERN_WARNING, ha,
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 6cfc28a..b74e6b5 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -29,8 +29,6 @@
/* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */
/* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */
-/* #define QL_PRINTK_BUF */ /* Captures printk to buffer */
-
/*
* Macros use for debugging the driver.
*/
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 3a432ea..d2a4e15 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2641,6 +2641,7 @@
#define MBX_UPDATE_FLASH_ACTIVE 3
struct mutex vport_lock; /* Virtual port synchronization */
+ spinlock_t vport_slock; /* order is hardware_lock, then vport_slock */
struct completion mbx_cmd_comp; /* Serialize mbx access */
struct completion mbx_intr_comp; /* Used for completion notification */
struct completion dcbx_comp; /* For set port config notification */
@@ -2828,6 +2829,7 @@
uint32_t management_server_logged_in :1;
uint32_t process_response_queue :1;
uint32_t difdix_supported:1;
+ uint32_t delete_progress:1;
} flags;
atomic_t loop_state;
@@ -2922,6 +2924,8 @@
struct req_que *req;
int fw_heartbeat_counter;
int seconds_since_last_heartbeat;
+
+ atomic_t vref_count;
} scsi_qla_host_t;
/*
@@ -2932,6 +2936,22 @@
test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \
atomic_read(&ha->loop_state) == LOOP_DOWN)
+#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \
+ atomic_inc(&__vha->vref_count); \
+ mb(); \
+ if (__vha->flags.delete_progress) { \
+ atomic_dec(&__vha->vref_count); \
+ __bail = 1; \
+ } else { \
+ __bail = 0; \
+ } \
+} while (0)
+
+#define QLA_VHA_MARK_NOT_BUSY(__vha) do { \
+ atomic_dec(&__vha->vref_count); \
+} while (0)
+
+
#define qla_printk(level, ha, format, arg...) \
dev_printk(level , &((ha)->pdev->dev) , format , ## arg)
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index d863ed2..9c383ba 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -69,21 +69,29 @@
{
struct srb_ctx *ctx = sp->ctx;
struct srb_iocb *iocb = ctx->u.iocb_cmd;
+ struct scsi_qla_host *vha = sp->fcport->vha;
del_timer_sync(&iocb->timer);
kfree(iocb);
kfree(ctx);
mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
+
+ QLA_VHA_MARK_NOT_BUSY(vha);
}
inline srb_t *
qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
unsigned long tmo)
{
- srb_t *sp;
+ srb_t *sp = NULL;
struct qla_hw_data *ha = vha->hw;
struct srb_ctx *ctx;
struct srb_iocb *iocb;
+ uint8_t bail;
+
+ QLA_VHA_MARK_BUSY(vha, bail);
+ if (bail)
+ return NULL;
sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
if (!sp)
@@ -116,6 +124,8 @@
iocb->timer.function = qla2x00_ctx_sp_timeout;
add_timer(&iocb->timer);
done:
+ if (!sp)
+ QLA_VHA_MARK_NOT_BUSY(vha);
return sp;
}
@@ -1777,11 +1787,15 @@
qla2x00_init_response_q_entries(rsp);
}
+ spin_lock_irqsave(&ha->vport_slock, flags);
/* Clear RSCN queue. */
list_for_each_entry(vp, &ha->vp_list, list) {
vp->rscn_in_ptr = 0;
vp->rscn_out_ptr = 0;
}
+
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
ha->isp_ops->config_rings(vha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3218,12 +3232,17 @@
/* Bypass virtual ports of the same host. */
found = 0;
if (ha->num_vhosts) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
if (new_fcport->d_id.b24 == vp->d_id.b24) {
found = 1;
break;
}
}
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
if (found)
continue;
}
@@ -3343,6 +3362,7 @@
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *vp;
struct scsi_qla_host *tvp;
+ unsigned long flags = 0;
rval = QLA_SUCCESS;
@@ -3367,6 +3387,8 @@
/* Check for loop ID being already in use. */
found = 0;
fcport = NULL;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
list_for_each_entry(fcport, &vp->vp_fcports, list) {
if (fcport->loop_id == dev->loop_id &&
@@ -3379,6 +3401,7 @@
if (found)
break;
}
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
/* If not in use then it is free to use. */
if (!found) {
@@ -3791,14 +3814,27 @@
qla2x00_update_fcports(scsi_qla_host_t *base_vha)
{
fc_port_t *fcport;
- struct scsi_qla_host *tvp, *vha;
+ struct scsi_qla_host *vha;
+ struct qla_hw_data *ha = base_vha->hw;
+ unsigned long flags;
+ spin_lock_irqsave(&ha->vport_slock, flags);
/* Go with deferred removal of rport references. */
- list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list)
- list_for_each_entry(fcport, &vha->vp_fcports, list)
+ list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
+ atomic_inc(&vha->vref_count);
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport && fcport->drport &&
- atomic_read(&fcport->state) != FCS_UNCONFIGURED)
+ atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
qla2x00_rport_del(fcport);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ }
+ }
+ atomic_dec(&vha->vref_count);
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
}
void
@@ -3806,7 +3842,7 @@
{
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
- struct scsi_qla_host *tvp;
+ unsigned long flags;
vha->flags.online = 0;
ha->flags.chip_reset_done = 0;
@@ -3824,8 +3860,18 @@
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
qla2x00_mark_all_devices_lost(vha, 0);
- list_for_each_entry_safe(vp, tvp, &base_vha->hw->vp_list, list)
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &base_vha->hw->vp_list, list) {
+ atomic_inc(&vp->vref_count);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
qla2x00_mark_all_devices_lost(vp, 0);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ atomic_dec(&vp->vref_count);
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
} else {
if (!atomic_read(&vha->loop_down_timer))
atomic_set(&vha->loop_down_timer,
@@ -3862,8 +3908,8 @@
uint8_t status = 0;
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *vp;
- struct scsi_qla_host *tvp;
struct req_que *req = ha->req_q_map[0];
+ unsigned long flags;
if (vha->flags.online) {
qla2x00_abort_isp_cleanup(vha);
@@ -3970,10 +4016,21 @@
DEBUG(printk(KERN_INFO
"qla2x00_abort_isp(%ld): succeeded.\n",
vha->host_no));
- list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
- if (vp->vp_idx)
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ if (vp->vp_idx) {
+ atomic_inc(&vp->vref_count);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
qla2x00_vp_abort_isp(vp);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ atomic_dec(&vp->vref_count);
+ }
}
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
} else {
qla_printk(KERN_INFO, ha,
"qla2x00_abort_isp: **** FAILED ****\n");
@@ -5185,7 +5242,7 @@
struct req_que *req = ha->req_q_map[0];
struct rsp_que *rsp = ha->rsp_q_map[0];
struct scsi_qla_host *vp;
- struct scsi_qla_host *tvp;
+ unsigned long flags;
status = qla2x00_init_rings(vha);
if (!status) {
@@ -5272,10 +5329,21 @@
DEBUG(printk(KERN_INFO
"qla82xx_restart_isp(%ld): succeeded.\n",
vha->host_no));
- list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
- if (vp->vp_idx)
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ if (vp->vp_idx) {
+ atomic_inc(&vp->vref_count);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
qla2x00_vp_abort_isp(vp);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ atomic_dec(&vp->vref_count);
+ }
}
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
} else {
qla_printk(KERN_INFO, ha,
"qla82xx_restart_isp: **** FAILED ****\n");
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 6982ba7..28f65be 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1706,19 +1706,20 @@
cp->result = DID_ERROR << 16;
break;
}
- } else if (!lscsi_status) {
+ } else {
DEBUG2(qla_printk(KERN_INFO, ha,
"scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x "
"of 0x%x bytes).\n", vha->host_no, cp->device->id,
cp->device->lun, resid, scsi_bufflen(cp)));
- cp->result = DID_ERROR << 16;
- break;
+ cp->result = DID_ERROR << 16 | lscsi_status;
+ goto check_scsi_status;
}
cp->result = DID_OK << 16 | lscsi_status;
logit = 0;
+check_scsi_status:
/*
* Check to see if SCSI Status is non zero. If so report SCSI
* Status.
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 6009b0c..a595ec8 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -2913,7 +2913,7 @@
uint16_t stat = le16_to_cpu(rptid_entry->vp_idx);
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *vp;
- scsi_qla_host_t *tvp;
+ unsigned long flags;
if (rptid_entry->entry_status != 0)
return;
@@ -2945,9 +2945,12 @@
return;
}
- list_for_each_entry_safe(vp, tvp, &ha->vp_list, list)
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list)
if (vp_idx == vp->vp_idx)
break;
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
if (!vp)
return;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 987c5b0..2b69392 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -30,6 +30,7 @@
{
uint32_t vp_id;
struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
/* Find an empty slot and assign an vp_id */
mutex_lock(&ha->vport_lock);
@@ -44,7 +45,11 @@
set_bit(vp_id, ha->vp_idx_map);
ha->num_vhosts++;
vha->vp_idx = vp_id;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
list_add_tail(&vha->list, &ha->vp_list);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
mutex_unlock(&ha->vport_lock);
return vp_id;
}
@@ -54,12 +59,31 @@
{
uint16_t vp_id;
struct qla_hw_data *ha = vha->hw;
+ unsigned long flags = 0;
mutex_lock(&ha->vport_lock);
+ /*
+ * Wait for all pending activities to finish before removing vport from
+ * the list.
+ * Lock needs to be held for safe removal from the list (it
+ * ensures no active vp_list traversal while the vport is removed
+ * from the queue)
+ */
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ while (atomic_read(&vha->vref_count)) {
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ msleep(500);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ }
+ list_del(&vha->list);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
vp_id = vha->vp_idx;
ha->num_vhosts--;
clear_bit(vp_id, ha->vp_idx_map);
- list_del(&vha->list);
+
mutex_unlock(&ha->vport_lock);
}
@@ -68,12 +92,17 @@
{
scsi_qla_host_t *vha;
struct scsi_qla_host *tvha;
+ unsigned long flags;
+ spin_lock_irqsave(&ha->vport_slock, flags);
/* Locate matching device in database. */
list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
- if (!memcmp(port_name, vha->port_name, WWN_SIZE))
+ if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
return vha;
+ }
}
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
return NULL;
}
@@ -93,6 +122,12 @@
static void
qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
{
+ /*
+ * !!! NOTE !!!
+ * This function, if called in contexts other than vp create, disable
+ * or delete, please make sure this is synchronized with the
+ * delete thread.
+ */
fc_port_t *fcport;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
@@ -100,7 +135,6 @@
"loop_id=0x%04x :%x\n",
vha->host_no, fcport->loop_id, fcport->vp_idx));
- atomic_set(&fcport->state, FCS_DEVICE_DEAD);
qla2x00_mark_device_lost(vha, fcport, 0, 0);
atomic_set(&fcport->state, FCS_UNCONFIGURED);
}
@@ -194,12 +228,17 @@
void
qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
{
- scsi_qla_host_t *vha, *tvha;
+ scsi_qla_host_t *vha;
struct qla_hw_data *ha = rsp->hw;
int i = 0;
+ unsigned long flags;
- list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vha, &ha->vp_list, list) {
if (vha->vp_idx) {
+ atomic_inc(&vha->vref_count);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
switch (mb[0]) {
case MBA_LIP_OCCURRED:
case MBA_LOOP_UP:
@@ -215,9 +254,13 @@
qla2x00_async_event(vha, rsp, mb);
break;
}
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ atomic_dec(&vha->vref_count);
}
i++;
}
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
}
int
@@ -297,7 +340,7 @@
int ret;
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *vp;
- struct scsi_qla_host *tvp;
+ unsigned long flags = 0;
if (vha->vp_idx)
return;
@@ -309,10 +352,19 @@
if (!(ha->current_topology & ISP_CFG_F))
return;
- list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
- if (vp->vp_idx)
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ if (vp->vp_idx) {
+ atomic_inc(&vp->vref_count);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
ret = qla2x00_do_dpc_vp(vp);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ atomic_dec(&vp->vref_count);
+ }
}
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
}
int
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 915b77a..0a71cc7 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -2672,6 +2672,19 @@
sufficient_dsds:
req_cnt = 1;
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+ ®->req_q_out[0]);
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ }
+
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+
ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
if (!sp->ctx) {
DEBUG(printk(KERN_INFO
@@ -3307,16 +3320,19 @@
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
}
qla2xxx_wake_dpc(vha);
+ ha->flags.fw_hung = 1;
if (ha->flags.mbox_busy) {
- ha->flags.fw_hung = 1;
ha->flags.mbox_int = 1;
DEBUG2(qla_printk(KERN_ERR, ha,
- "Due to fw hung, doing premature "
- "completion of mbx command\n"));
- complete(&ha->mbx_intr_comp);
+ "Due to fw hung, doing premature "
+ "completion of mbx command\n"));
+ if (test_bit(MBX_INTR_WAIT,
+ &ha->mbx_cmd_flags))
+ complete(&ha->mbx_intr_comp);
}
}
- }
+ } else
+ vha->seconds_since_last_heartbeat = 0;
vha->fw_heartbeat_counter = fw_heartbeat_counter;
}
@@ -3418,13 +3434,15 @@
"%s(): Adapter reset needed!\n", __func__);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
+ ha->flags.fw_hung = 1;
if (ha->flags.mbox_busy) {
- ha->flags.fw_hung = 1;
ha->flags.mbox_int = 1;
DEBUG2(qla_printk(KERN_ERR, ha,
- "Need reset, doing premature "
- "completion of mbx command\n"));
- complete(&ha->mbx_intr_comp);
+ "Need reset, doing premature "
+ "completion of mbx command\n"));
+ if (test_bit(MBX_INTR_WAIT,
+ &ha->mbx_cmd_flags))
+ complete(&ha->mbx_intr_comp);
}
} else {
qla82xx_check_fw_alive(vha);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8c80b49..1e4bff6 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -2341,16 +2341,28 @@
static void
qla2x00_remove_one(struct pci_dev *pdev)
{
- scsi_qla_host_t *base_vha, *vha, *temp;
+ scsi_qla_host_t *base_vha, *vha;
struct qla_hw_data *ha;
+ unsigned long flags;
base_vha = pci_get_drvdata(pdev);
ha = base_vha->hw;
- list_for_each_entry_safe(vha, temp, &ha->vp_list, list) {
- if (vha && vha->fc_vport)
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vha, &ha->vp_list, list) {
+ atomic_inc(&vha->vref_count);
+
+ if (vha && vha->fc_vport) {
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
fc_vport_terminate(vha->fc_vport);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ }
+
+ atomic_dec(&vha->vref_count);
}
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
set_bit(UNLOADING, &base_vha->dpc_flags);
@@ -2975,10 +2987,17 @@
qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
{
struct qla_work_evt *e;
+ uint8_t bail;
+
+ QLA_VHA_MARK_BUSY(vha, bail);
+ if (bail)
+ return NULL;
e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
- if (!e)
+ if (!e) {
+ QLA_VHA_MARK_NOT_BUSY(vha);
return NULL;
+ }
INIT_LIST_HEAD(&e->list);
e->type = type;
@@ -3135,6 +3154,9 @@
}
if (e->flags & QLA_EVT_FLAG_FREE)
kfree(e);
+
+ /* For each work completed decrement vha ref count */
+ QLA_VHA_MARK_NOT_BUSY(vha);
}
}
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index e75ccb9..8edbccb 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.03.03-k0"
+#define QLA2XXX_VERSION "8.03.04-k0"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 3
-#define QLA_DRIVER_PATCH_VER 3
+#define QLA_DRIVER_PATCH_VER 4
#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9ade720..ee02d38 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1011,8 +1011,8 @@
err_exit:
scsi_release_buffers(cmd);
- scsi_put_command(cmd);
cmd->request->special = NULL;
+ scsi_put_command(cmd);
return error;
}
EXPORT_SYMBOL(scsi_init_io);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 2714bec..ffa0689 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -870,7 +870,7 @@
SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n"));
- if (atomic_dec_return(&sdkp->openers) && sdev->removable) {
+ if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) {
if (scsi_block_when_processing_errors(sdev))
scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
}
@@ -2625,15 +2625,15 @@
static void sd_print_sense_hdr(struct scsi_disk *sdkp,
struct scsi_sense_hdr *sshdr)
{
- sd_printk(KERN_INFO, sdkp, "");
+ sd_printk(KERN_INFO, sdkp, " ");
scsi_show_sense_hdr(sshdr);
- sd_printk(KERN_INFO, sdkp, "");
+ sd_printk(KERN_INFO, sdkp, " ");
scsi_show_extd_sense(sshdr->asc, sshdr->ascq);
}
static void sd_print_result(struct scsi_disk *sdkp, int result)
{
- sd_printk(KERN_INFO, sdkp, "");
+ sd_printk(KERN_INFO, sdkp, " ");
scsi_show_result(result);
}
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index a7bc8b7..2c3e89d 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -72,10 +72,7 @@
static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg)
{
- if (label)
- sym_print_addr(cp->cmd, "%s: ", label);
- else
- sym_print_addr(cp->cmd, "");
+ sym_print_addr(cp->cmd, "%s: ", label);
spi_print_msg(msg);
printf("\n");
@@ -4558,7 +4555,8 @@
switch (np->msgin [2]) {
case M_X_MODIFY_DP:
if (DEBUG_FLAGS & DEBUG_POINTER)
- sym_print_msg(cp, NULL, np->msgin);
+ sym_print_msg(cp, "extended msg ",
+ np->msgin);
tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) +
(np->msgin[5]<<8) + (np->msgin[6]);
sym_modify_dp(np, tp, cp, tmp);
@@ -4585,7 +4583,7 @@
*/
case M_IGN_RESIDUE:
if (DEBUG_FLAGS & DEBUG_POINTER)
- sym_print_msg(cp, NULL, np->msgin);
+ sym_print_msg(cp, "1 or 2 byte ", np->msgin);
if (cp->host_flags & HF_SENSE)
OUTL_DSP(np, SCRIPTA_BA(np, clrack));
else
diff --git a/drivers/serial/amba-pl010.c b/drivers/serial/amba-pl010.c
index 50441ff..2904aa0 100644
--- a/drivers/serial/amba-pl010.c
+++ b/drivers/serial/amba-pl010.c
@@ -472,14 +472,9 @@
spin_unlock_irqrestore(&uap->port.lock, flags);
}
-static void pl010_set_ldisc(struct uart_port *port)
+static void pl010_set_ldisc(struct uart_port *port, int new)
{
- int line = port->line;
-
- if (line >= port->state->port.tty->driver->num)
- return;
-
- if (port->state->port.tty->ldisc->ops->num == N_PPS) {
+ if (new == N_PPS) {
port->flags |= UPF_HARDPPS_CD;
pl010_enable_ms(port);
} else
diff --git a/drivers/serial/mfd.c b/drivers/serial/mfd.c
index bc9af50..324c385 100644
--- a/drivers/serial/mfd.c
+++ b/drivers/serial/mfd.c
@@ -1423,7 +1423,6 @@
}
phsu = hsu;
-
hsu_debugfs_init(hsu);
return;
@@ -1435,18 +1434,20 @@
static void serial_hsu_remove(struct pci_dev *pdev)
{
- struct hsu_port *hsu;
- int i;
+ void *priv = pci_get_drvdata(pdev);
+ struct uart_hsu_port *up;
- hsu = pci_get_drvdata(pdev);
- if (!hsu)
+ if (!priv)
return;
- for (i = 0; i < 3; i++)
- uart_remove_one_port(&serial_hsu_reg, &hsu->port[i].port);
+ /* For port 0/1/2, priv is the address of uart_hsu_port */
+ if (pdev->device != 0x081E) {
+ up = priv;
+ uart_remove_one_port(&serial_hsu_reg, &up->port);
+ }
pci_set_drvdata(pdev, NULL);
- free_irq(hsu->irq, hsu);
+ free_irq(pdev->irq, priv);
pci_disable_device(pdev);
}
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index 8dedb26..c4399e2 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -500,6 +500,7 @@
psc_fifoc = of_iomap(np, 0);
if (!psc_fifoc) {
pr_err("%s: Can't map FIFOC\n", __func__);
+ of_node_put(np);
return -ENODEV;
}
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index 141c695..7d475b2 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -335,8 +335,6 @@
info->p_dev = link;
link->priv = info;
- link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
- link->resource[0]->end = 8;
link->conf.Attributes = CONF_ENABLE_IRQ;
if (do_sound) {
link->conf.Attributes |= CONF_ENABLE_SPKR;
@@ -411,6 +409,27 @@
/*====================================================================*/
+static int pfc_config(struct pcmcia_device *p_dev)
+{
+ unsigned int port = 0;
+ struct serial_info *info = p_dev->priv;
+
+ if ((p_dev->resource[1]->end != 0) &&
+ (resource_size(p_dev->resource[1]) == 8)) {
+ port = p_dev->resource[1]->start;
+ info->slave = 1;
+ } else if ((info->manfid == MANFID_OSITECH) &&
+ (resource_size(p_dev->resource[0]) == 0x40)) {
+ port = p_dev->resource[0]->start + 0x28;
+ info->slave = 1;
+ }
+ if (info->slave)
+ return setup_serial(p_dev, info, port, p_dev->irq);
+
+ dev_warn(&p_dev->dev, "no usable port range found, giving up\n");
+ return -ENODEV;
+}
+
static int simple_config_check(struct pcmcia_device *p_dev,
cistpl_cftable_entry_t *cf,
cistpl_cftable_entry_t *dflt,
@@ -461,23 +480,8 @@
struct serial_info *info = link->priv;
int i = -ENODEV, try;
- /* If the card is already configured, look up the port and irq */
- if (link->function_config) {
- unsigned int port = 0;
- if ((link->resource[1]->end != 0) &&
- (resource_size(link->resource[1]) == 8)) {
- port = link->resource[1]->end;
- info->slave = 1;
- } else if ((info->manfid == MANFID_OSITECH) &&
- (resource_size(link->resource[0]) == 0x40)) {
- port = link->resource[0]->start + 0x28;
- info->slave = 1;
- }
- if (info->slave) {
- return setup_serial(link, info, port,
- link->irq);
- }
- }
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+ link->resource[0]->end = 8;
/* First pass: look for a config entry that looks normal.
* Two tries: without IO aliases, then with aliases */
@@ -491,8 +495,7 @@
if (!pcmcia_loop_config(link, simple_config_check_notpicky, NULL))
goto found_port;
- printk(KERN_NOTICE
- "serial_cs: no usable port range found, giving up\n");
+ dev_warn(&link->dev, "no usable port range found, giving up\n");
return -1;
found_port:
@@ -558,6 +561,7 @@
int i, base2 = 0;
/* First, look for a generic full-sized window */
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
link->resource[0]->end = info->multi * 8;
if (pcmcia_loop_config(link, multi_config_check, &base2)) {
/* If that didn't work, look for two windows */
@@ -565,15 +569,14 @@
info->multi = 2;
if (pcmcia_loop_config(link, multi_config_check_notpicky,
&base2)) {
- printk(KERN_NOTICE "serial_cs: no usable port range"
+ dev_warn(&link->dev, "no usable port range "
"found, giving up\n");
return -ENODEV;
}
}
if (!link->irq)
- dev_warn(&link->dev,
- "serial_cs: no usable IRQ found, continuing...\n");
+ dev_warn(&link->dev, "no usable IRQ found, continuing...\n");
/*
* Apply any configuration quirks.
@@ -675,6 +678,7 @@
multifunction cards that ask for appropriate IO port ranges */
if ((info->multi == 0) &&
(link->has_func_id) &&
+ (link->socket->pcmcia_pfc == 0) &&
((link->func_id == CISTPL_FUNCID_MULTI) ||
(link->func_id == CISTPL_FUNCID_SERIAL)))
pcmcia_loop_config(link, serial_check_for_multi, info);
@@ -685,7 +689,13 @@
if (info->quirk && info->quirk->multi != -1)
info->multi = info->quirk->multi;
- if (info->multi > 1)
+ dev_info(&link->dev,
+ "trying to set up [0x%04x:0x%04x] (pfc: %d, multi: %d, quirk: %p)\n",
+ link->manf_id, link->card_id,
+ link->socket->pcmcia_pfc, info->multi, info->quirk);
+ if (link->socket->pcmcia_pfc)
+ i = pfc_config(link);
+ else if (info->multi > 1)
i = multi_config(link);
else
i = simple_config(link);
@@ -704,7 +714,7 @@
return 0;
failed:
- dev_warn(&link->dev, "serial_cs: failed to initialize\n");
+ dev_warn(&link->dev, "failed to initialize\n");
serial_remove(link);
return -ENODEV;
}
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index acd35d1..4c37c4e2 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -503,8 +503,9 @@
msg->state = NULL;
if (msg->complete)
msg->complete(msg->context);
- /* This message is completed, so let's turn off the clock! */
+ /* This message is completed, so let's turn off the clocks! */
clk_disable(pl022->clk);
+ amba_pclk_disable(pl022->adev);
}
/**
@@ -1139,9 +1140,10 @@
/* Setup the SPI using the per chip configuration */
pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi);
/*
- * We enable the clock here, then the clock will be disabled when
+ * We enable the clocks here, then the clocks will be disabled when
* giveback() is called in each method (poll/interrupt/DMA)
*/
+ amba_pclk_enable(pl022->adev);
clk_enable(pl022->clk);
restore_state(pl022);
flush(pl022);
@@ -1786,11 +1788,9 @@
}
/* Disable SSP */
- clk_enable(pl022->clk);
writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
SSP_CR1(pl022->virtbase));
load_ssp_default_config(pl022);
- clk_disable(pl022->clk);
status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022",
pl022);
@@ -1818,6 +1818,8 @@
goto err_spi_register;
}
dev_dbg(dev, "probe succeded\n");
+ /* Disable the silicon block pclk and clock it when needed */
+ amba_pclk_disable(adev);
return 0;
err_spi_register:
@@ -1879,9 +1881,9 @@
return status;
}
- clk_enable(pl022->clk);
+ amba_pclk_enable(adev);
load_ssp_default_config(pl022);
- clk_disable(pl022->clk);
+ amba_pclk_disable(adev);
dev_dbg(&adev->dev, "suspended\n");
return 0;
}
@@ -1981,7 +1983,7 @@
return amba_driver_register(&pl022_driver);
}
-module_init(pl022_init);
+subsys_initcall(pl022_init);
static void __exit pl022_exit(void)
{
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c
index d256cb0..5624785 100644
--- a/drivers/spi/dw_spi.c
+++ b/drivers/spi/dw_spi.c
@@ -181,10 +181,6 @@
wait_till_not_busy(dws);
}
-static void null_cs_control(u32 command)
-{
-}
-
static int null_writer(struct dw_spi *dws)
{
u8 n_bytes = dws->n_bytes;
@@ -322,7 +318,7 @@
struct spi_transfer,
transfer_list);
- if (!last_transfer->cs_change)
+ if (!last_transfer->cs_change && dws->cs_control)
dws->cs_control(MRST_SPI_DEASSERT);
msg->state = NULL;
@@ -396,6 +392,11 @@
static irqreturn_t dw_spi_irq(int irq, void *dev_id)
{
struct dw_spi *dws = dev_id;
+ u16 irq_status, irq_mask = 0x3f;
+
+ irq_status = dw_readw(dws, isr) & irq_mask;
+ if (!irq_status)
+ return IRQ_NONE;
if (!dws->cur_msg) {
spi_mask_intr(dws, SPI_INT_TXEI);
@@ -544,13 +545,13 @@
*/
if (dws->cs_control) {
if (dws->rx && dws->tx)
- chip->tmode = 0x00;
+ chip->tmode = SPI_TMOD_TR;
else if (dws->rx)
- chip->tmode = 0x02;
+ chip->tmode = SPI_TMOD_RO;
else
- chip->tmode = 0x01;
+ chip->tmode = SPI_TMOD_TO;
- cr0 &= ~(0x3 << SPI_MODE_OFFSET);
+ cr0 &= ~SPI_TMOD_MASK;
cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
}
@@ -699,9 +700,6 @@
chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
if (!chip)
return -ENOMEM;
-
- chip->cs_control = null_cs_control;
- chip->enable_dma = 0;
}
/*
@@ -883,7 +881,7 @@
dws->dma_inited = 0;
dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
- ret = request_irq(dws->irq, dw_spi_irq, 0,
+ ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED,
"dw_spi", dws);
if (ret < 0) {
dev_err(&master->dev, "can not get IRQ\n");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index a9e5c79..0bcf4c1 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -554,11 +554,9 @@
EXPORT_SYMBOL_GPL(spi_register_master);
-static int __unregister(struct device *dev, void *master_dev)
+static int __unregister(struct device *dev, void *null)
{
- /* note: before about 2.6.14-rc1 this would corrupt memory: */
- if (dev != master_dev)
- spi_unregister_device(to_spi_device(dev));
+ spi_unregister_device(to_spi_device(dev));
return 0;
}
@@ -576,8 +574,7 @@
{
int dummy;
- dummy = device_for_each_child(master->dev.parent, &master->dev,
- __unregister);
+ dummy = device_for_each_child(&master->dev, NULL, __unregister);
device_unregister(&master->dev);
}
EXPORT_SYMBOL_GPL(spi_unregister_master);
diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi_s3c64xx.c
index 9736581..c3038da 100644
--- a/drivers/spi/spi_s3c64xx.c
+++ b/drivers/spi/spi_s3c64xx.c
@@ -200,6 +200,9 @@
val = readl(regs + S3C64XX_SPI_STATUS);
} while (TX_FIFO_LVL(val, sci) && loops--);
+ if (loops == 0)
+ dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
+
/* Flush RxFIFO*/
loops = msecs_to_loops(1);
do {
@@ -210,6 +213,9 @@
break;
} while (loops--);
+ if (loops == 0)
+ dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");
+
val = readl(regs + S3C64XX_SPI_CH_CFG);
val &= ~S3C64XX_SPI_CH_SW_RST;
writel(val, regs + S3C64XX_SPI_CH_CFG);
@@ -320,16 +326,17 @@
/* millisecs to xfer 'len' bytes @ 'cur_speed' */
ms = xfer->len * 8 * 1000 / sdd->cur_speed;
- ms += 5; /* some tolerance */
+ ms += 10; /* some tolerance */
if (dma_mode) {
val = msecs_to_jiffies(ms) + 10;
val = wait_for_completion_timeout(&sdd->xfer_completion, val);
} else {
+ u32 status;
val = msecs_to_loops(ms);
do {
- val = readl(regs + S3C64XX_SPI_STATUS);
- } while (RX_FIFO_LVL(val, sci) < xfer->len && --val);
+ status = readl(regs + S3C64XX_SPI_STATUS);
+ } while (RX_FIFO_LVL(status, sci) < xfer->len && --val);
}
if (!val)
@@ -447,8 +454,8 @@
writel(val, regs + S3C64XX_SPI_CLK_CFG);
}
-void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
- int size, enum s3c2410_dma_buffresult res)
+static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
+ int size, enum s3c2410_dma_buffresult res)
{
struct s3c64xx_spi_driver_data *sdd = buf_id;
unsigned long flags;
@@ -467,8 +474,8 @@
spin_unlock_irqrestore(&sdd->lock, flags);
}
-void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
- int size, enum s3c2410_dma_buffresult res)
+static void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
+ int size, enum s3c2410_dma_buffresult res)
{
struct s3c64xx_spi_driver_data *sdd = buf_id;
unsigned long flags;
@@ -508,8 +515,9 @@
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
if (xfer->tx_buf != NULL) {
- xfer->tx_dma = dma_map_single(dev, xfer->tx_buf,
- xfer->len, DMA_TO_DEVICE);
+ xfer->tx_dma = dma_map_single(dev,
+ (void *)xfer->tx_buf, xfer->len,
+ DMA_TO_DEVICE);
if (dma_mapping_error(dev, xfer->tx_dma)) {
dev_err(dev, "dma_map_single Tx failed\n");
xfer->tx_dma = XFER_DMAADDR_INVALID;
@@ -919,6 +927,13 @@
return -ENODEV;
}
+ sci = pdev->dev.platform_data;
+ if (!sci->src_clk_name) {
+ dev_err(&pdev->dev,
+ "Board init must call s3c64xx_spi_set_info()\n");
+ return -EINVAL;
+ }
+
/* Check for availability of necessary resource */
dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
@@ -946,8 +961,6 @@
return -ENOMEM;
}
- sci = pdev->dev.platform_data;
-
platform_set_drvdata(pdev, master);
sdd = spi_master_get_devdata(master);
@@ -1170,7 +1183,7 @@
{
return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe);
}
-module_init(s3c64xx_spi_init);
+subsys_initcall(s3c64xx_spi_init);
static void __exit s3c64xx_spi_exit(void)
{
diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c
index baa8b05..6e973a7 100644
--- a/drivers/staging/batman-adv/hard-interface.c
+++ b/drivers/staging/batman-adv/hard-interface.c
@@ -30,7 +30,6 @@
#include "hash.h"
#include <linux/if_arp.h>
-#include <linux/netfilter_bridge.h>
#define MIN(x, y) ((x) < (y) ? (x) : (y))
@@ -431,11 +430,6 @@
return NOTIFY_DONE;
}
-static int batman_skb_recv_finish(struct sk_buff *skb)
-{
- return NF_ACCEPT;
-}
-
/* receive a packet with the batman ethertype coming on a hard
* interface */
int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
@@ -456,13 +450,6 @@
if (atomic_read(&module_state) != MODULE_ACTIVE)
goto err_free;
- /* if netfilter/ebtables wants to block incoming batman
- * packets then give them a chance to do so here */
- ret = NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, dev, NULL,
- batman_skb_recv_finish);
- if (ret != 1)
- goto err_out;
-
/* packet should hold at least type and version */
if (unlikely(skb_headlen(skb) < 2))
goto err_free;
diff --git a/drivers/staging/batman-adv/send.c b/drivers/staging/batman-adv/send.c
index 055edee..da3c82e4 100644
--- a/drivers/staging/batman-adv/send.c
+++ b/drivers/staging/batman-adv/send.c
@@ -29,7 +29,6 @@
#include "vis.h"
#include "aggregation.h"
-#include <linux/netfilter_bridge.h>
static void send_outstanding_bcast_packet(struct work_struct *work);
@@ -92,12 +91,9 @@
/* dev_queue_xmit() returns a negative result on error. However on
* congestion and traffic shaping, it drops and returns NET_XMIT_DROP
- * (which is > 0). This will not be treated as an error.
- * Also, if netfilter/ebtables wants to block outgoing batman
- * packets then giving them a chance to do so here */
+ * (which is > 0). This will not be treated as an error. */
- return NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
- dev_queue_xmit);
+ return dev_queue_xmit(skb);
send_skb_err:
kfree_skb(skb);
return NET_XMIT_DROP;
diff --git a/drivers/staging/ti-st/st.h b/drivers/staging/ti-st/st.h
index 9952579..1b3060e 100644
--- a/drivers/staging/ti-st/st.h
+++ b/drivers/staging/ti-st/st.h
@@ -80,5 +80,4 @@
extern long st_register(struct st_proto_s *);
extern long st_unregister(enum proto_type);
-extern struct platform_device *st_get_plat_device(void);
#endif /* ST_H */
diff --git a/drivers/staging/ti-st/st_core.c b/drivers/staging/ti-st/st_core.c
index 063c9b1..b85d8bf 100644
--- a/drivers/staging/ti-st/st_core.c
+++ b/drivers/staging/ti-st/st_core.c
@@ -38,7 +38,6 @@
#include "st_ll.h"
#include "st.h"
-#define VERBOSE
/* strings to be used for rfkill entries and by
* ST Core to be used for sysfs debug entry
*/
@@ -581,7 +580,7 @@
long err = 0;
unsigned long flags = 0;
- st_kim_ref(&st_gdata);
+ st_kim_ref(&st_gdata, 0);
pr_info("%s(%d) ", __func__, new_proto->type);
if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL
|| new_proto->reg_complete_cb == NULL) {
@@ -713,7 +712,7 @@
pr_debug("%s: %d ", __func__, type);
- st_kim_ref(&st_gdata);
+ st_kim_ref(&st_gdata, 0);
if (type < ST_BT || type >= ST_MAX) {
pr_err(" protocol %d not supported", type);
return -EPROTONOSUPPORT;
@@ -767,7 +766,7 @@
#endif
long len;
- st_kim_ref(&st_gdata);
+ st_kim_ref(&st_gdata, 0);
if (unlikely(skb == NULL || st_gdata == NULL
|| st_gdata->tty == NULL)) {
pr_err("data/tty unavailable to perform write");
@@ -818,7 +817,7 @@
struct st_data_s *st_gdata;
pr_info("%s ", __func__);
- st_kim_ref(&st_gdata);
+ st_kim_ref(&st_gdata, 0);
st_gdata->tty = tty;
tty->disc_data = st_gdata;
diff --git a/drivers/staging/ti-st/st_core.h b/drivers/staging/ti-st/st_core.h
index e0c32d1..8601320 100644
--- a/drivers/staging/ti-st/st_core.h
+++ b/drivers/staging/ti-st/st_core.h
@@ -117,7 +117,7 @@
void st_core_exit(struct st_data_s *);
/* ask for reference from KIM */
-void st_kim_ref(struct st_data_s **);
+void st_kim_ref(struct st_data_s **, int);
#define GPS_STUB_TEST
#ifdef GPS_STUB_TEST
diff --git a/drivers/staging/ti-st/st_kim.c b/drivers/staging/ti-st/st_kim.c
index b4a6c7f..9e99463 100644
--- a/drivers/staging/ti-st/st_kim.c
+++ b/drivers/staging/ti-st/st_kim.c
@@ -72,11 +72,26 @@
PROTO_ENTRY(ST_GPS, "GPS"),
};
+#define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */
+struct platform_device *st_kim_devices[MAX_ST_DEVICES];
/**********************************************************************/
/* internal functions */
/**
+ * st_get_plat_device -
+ * function which returns the reference to the platform device
+ * requested by id. As of now only 1 such device exists (id=0)
+ * the context requesting for reference can get the id to be
+ * requested by a. The protocol driver which is registering or
+ * b. the tty device which is opened.
+ */
+static struct platform_device *st_get_plat_device(int id)
+{
+ return st_kim_devices[id];
+}
+
+/**
* validate_firmware_response -
* function to return whether the firmware response was proper
* in case of error don't complete so that waiting for proper
@@ -353,7 +368,7 @@
struct kim_data_s *kim_gdata;
pr_info(" %s ", __func__);
- kim_pdev = st_get_plat_device();
+ kim_pdev = st_get_plat_device(0);
kim_gdata = dev_get_drvdata(&kim_pdev->dev);
if (kim_gdata->gpios[type] == -1) {
@@ -574,12 +589,12 @@
* This would enable multiple such platform devices to exist
* on a given platform
*/
-void st_kim_ref(struct st_data_s **core_data)
+void st_kim_ref(struct st_data_s **core_data, int id)
{
struct platform_device *pdev;
struct kim_data_s *kim_gdata;
/* get kim_gdata reference from platform device */
- pdev = st_get_plat_device();
+ pdev = st_get_plat_device(id);
kim_gdata = dev_get_drvdata(&pdev->dev);
*core_data = kim_gdata->core_data;
}
@@ -623,6 +638,7 @@
long *gpios = pdev->dev.platform_data;
struct kim_data_s *kim_gdata;
+ st_kim_devices[pdev->id] = pdev;
kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC);
if (!kim_gdata) {
pr_err("no mem to allocate");
diff --git a/drivers/staging/vt6655/wpactl.c b/drivers/staging/vt6655/wpactl.c
index 0142338b..4bdb836 100644
--- a/drivers/staging/vt6655/wpactl.c
+++ b/drivers/staging/vt6655/wpactl.c
@@ -766,9 +766,14 @@
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len);
- if (param->u.wpa_associate.wpa_ie &&
- copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len))
- return -EINVAL;
+ if (param->u.wpa_associate.wpa_ie_len) {
+ if (!param->u.wpa_associate.wpa_ie)
+ return -EINVAL;
+ if (param->u.wpa_associate.wpa_ie_len > sizeof(abyWPAIE))
+ return -EINVAL;
+ if (copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len))
+ return -EFAULT;
+ }
if (param->u.wpa_associate.mode == 1)
pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index 7e594449..9eed5b5 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -91,12 +91,12 @@
If you are unsure about this, say N here.
config USB_SUSPEND
- bool "USB runtime power management (suspend/resume and wakeup)"
+ bool "USB runtime power management (autosuspend) and wakeup"
depends on USB && PM_RUNTIME
help
If you say Y here, you can use driver calls or the sysfs
- "power/level" file to suspend or resume individual USB
- peripherals and to enable or disable autosuspend (see
+ "power/control" file to enable or disable autosuspend for
+ individual USB peripherals (see
Documentation/usb/power-management.txt for more details).
Also, USB "remote wakeup" signaling is supported, whereby some
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index f06f5db..1e6ccef 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -159,9 +159,9 @@
int usb_register_dev(struct usb_interface *intf,
struct usb_class_driver *class_driver)
{
- int retval = -EINVAL;
+ int retval;
int minor_base = class_driver->minor_base;
- int minor = 0;
+ int minor;
char name[20];
char *temp;
@@ -173,12 +173,17 @@
*/
minor_base = 0;
#endif
- intf->minor = -1;
-
- dbg ("looking for a minor, starting at %d", minor_base);
if (class_driver->fops == NULL)
- goto exit;
+ return -EINVAL;
+ if (intf->minor >= 0)
+ return -EADDRINUSE;
+
+ retval = init_usb_class();
+ if (retval)
+ return retval;
+
+ dev_dbg(&intf->dev, "looking for a minor, starting at %d", minor_base);
down_write(&minor_rwsem);
for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) {
@@ -186,20 +191,12 @@
continue;
usb_minors[minor] = class_driver->fops;
-
- retval = 0;
+ intf->minor = minor;
break;
}
up_write(&minor_rwsem);
-
- if (retval)
- goto exit;
-
- retval = init_usb_class();
- if (retval)
- goto exit;
-
- intf->minor = minor;
+ if (intf->minor < 0)
+ return -EXFULL;
/* create a usb class device for this usb interface */
snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
@@ -213,11 +210,11 @@
"%s", temp);
if (IS_ERR(intf->usb_dev)) {
down_write(&minor_rwsem);
- usb_minors[intf->minor] = NULL;
+ usb_minors[minor] = NULL;
+ intf->minor = -1;
up_write(&minor_rwsem);
retval = PTR_ERR(intf->usb_dev);
}
-exit:
return retval;
}
EXPORT_SYMBOL_GPL(usb_register_dev);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 844683e..9f0ce7d 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1802,6 +1802,7 @@
intf->dev.groups = usb_interface_groups;
intf->dev.dma_mask = dev->dev.dma_mask;
INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
+ intf->minor = -1;
device_initialize(&intf->dev);
dev_set_name(&intf->dev, "%d-%s:%d.%d",
dev->bus->busnum, dev->devpath,
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 58b72d7..a1e8d27 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -119,6 +119,11 @@
ehci->broken_periodic = 1;
ehci_info(ehci, "using broken periodic workaround\n");
}
+ if (pdev->device == 0x0806 || pdev->device == 0x0811
+ || pdev->device == 0x0829) {
+ ehci_info(ehci, "disable lpm for langwell/penwell\n");
+ ehci->has_lpm = 0;
+ }
break;
case PCI_VENDOR_ID_TDI:
if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index 59dc3d3..5ab5bb8 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -322,6 +322,7 @@
index, transmit ? 'T' : 'R', cppi_ch);
cppi_ch->hw_ep = ep;
cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
+ cppi_ch->channel.max_len = 0x7fffffff;
DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R');
return &cppi_ch->channel;
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index c79a5e3..9e8639d 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -195,15 +195,14 @@
static int musb_test_mode_open(struct inode *inode, struct file *file)
{
- file->private_data = inode->i_private;
-
return single_open(file, musb_test_mode_show, inode->i_private);
}
static ssize_t musb_test_mode_write(struct file *file,
const char __user *ubuf, size_t count, loff_t *ppos)
{
- struct musb *musb = file->private_data;
+ struct seq_file *s = file->private_data;
+ struct musb *musb = s->private;
u8 test = 0;
char buf[18];
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 6fca870..d065e23 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -300,6 +300,11 @@
#ifndef CONFIG_MUSB_PIO_ONLY
if (is_dma_capable() && musb_ep->dma) {
struct dma_controller *c = musb->dma_controller;
+ size_t request_size;
+
+ /* setup DMA, then program endpoint CSR */
+ request_size = min_t(size_t, request->length - request->actual,
+ musb_ep->dma->max_len);
use_dma = (request->dma != DMA_ADDR_INVALID);
@@ -307,11 +312,6 @@
#ifdef CONFIG_USB_INVENTRA_DMA
{
- size_t request_size;
-
- /* setup DMA, then program endpoint CSR */
- request_size = min_t(size_t, request->length,
- musb_ep->dma->max_len);
if (request_size < musb_ep->packet_sz)
musb_ep->dma->desired_mode = 0;
else
@@ -373,8 +373,8 @@
use_dma = use_dma && c->channel_program(
musb_ep->dma, musb_ep->packet_sz,
0,
- request->dma,
- request->length);
+ request->dma + request->actual,
+ request_size);
if (!use_dma) {
c->channel_release(musb_ep->dma);
musb_ep->dma = NULL;
@@ -386,8 +386,8 @@
use_dma = use_dma && c->channel_program(
musb_ep->dma, musb_ep->packet_sz,
request->zero,
- request->dma,
- request->length);
+ request->dma + request->actual,
+ request_size);
#endif
}
#endif
@@ -501,26 +501,14 @@
request->zero = 0;
}
- /* ... or if not, then complete it. */
- musb_g_giveback(musb_ep, request, 0);
-
- /*
- * Kickstart next transfer if appropriate;
- * the packet that just completed might not
- * be transmitted for hours or days.
- * REVISIT for double buffering...
- * FIXME revisit for stalls too...
- */
- musb_ep_select(mbase, epnum);
- csr = musb_readw(epio, MUSB_TXCSR);
- if (csr & MUSB_TXCSR_FIFONOTEMPTY)
- return;
-
- request = musb_ep->desc ? next_request(musb_ep) : NULL;
- if (!request) {
- DBG(4, "%s idle now\n",
- musb_ep->end_point.name);
- return;
+ if (request->actual == request->length) {
+ musb_g_giveback(musb_ep, request, 0);
+ request = musb_ep->desc ? next_request(musb_ep) : NULL;
+ if (!request) {
+ DBG(4, "%s idle now\n",
+ musb_ep->end_point.name);
+ return;
+ }
}
}
@@ -568,11 +556,19 @@
{
const u8 epnum = req->epnum;
struct usb_request *request = &req->request;
- struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
+ struct musb_ep *musb_ep;
void __iomem *epio = musb->endpoints[epnum].regs;
unsigned fifo_count = 0;
- u16 len = musb_ep->packet_sz;
+ u16 len;
u16 csr = musb_readw(epio, MUSB_RXCSR);
+ struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
+
+ if (hw_ep->is_shared_fifo)
+ musb_ep = &hw_ep->ep_in;
+ else
+ musb_ep = &hw_ep->ep_out;
+
+ len = musb_ep->packet_sz;
/* We shouldn't get here while DMA is active, but we do... */
if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
@@ -647,8 +643,8 @@
*/
csr |= MUSB_RXCSR_DMAENAB;
-#ifdef USE_MODE1
csr |= MUSB_RXCSR_AUTOCLEAR;
+#ifdef USE_MODE1
/* csr |= MUSB_RXCSR_DMAMODE; */
/* this special sequence (enabling and then
@@ -663,10 +659,11 @@
if (request->actual < request->length) {
int transfer_size = 0;
#ifdef USE_MODE1
- transfer_size = min(request->length,
+ transfer_size = min(request->length - request->actual,
channel->max_len);
#else
- transfer_size = len;
+ transfer_size = min(request->length - request->actual,
+ (unsigned)len);
#endif
if (transfer_size <= musb_ep->packet_sz)
musb_ep->dma->desired_mode = 0;
@@ -740,9 +737,15 @@
u16 csr;
struct usb_request *request;
void __iomem *mbase = musb->mregs;
- struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
+ struct musb_ep *musb_ep;
void __iomem *epio = musb->endpoints[epnum].regs;
struct dma_channel *dma;
+ struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
+
+ if (hw_ep->is_shared_fifo)
+ musb_ep = &hw_ep->ep_in;
+ else
+ musb_ep = &hw_ep->ep_out;
musb_ep_select(mbase, epnum);
@@ -1081,7 +1084,7 @@
/*
* Context: controller locked, IRQs blocked.
*/
-static void musb_ep_restart(struct musb *musb, struct musb_request *req)
+void musb_ep_restart(struct musb *musb, struct musb_request *req)
{
DBG(3, "<== %s request %p len %u on hw_ep%d\n",
req->tx ? "TX/IN" : "RX/OUT",
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
index c8b1403..572b1da 100644
--- a/drivers/usb/musb/musb_gadget.h
+++ b/drivers/usb/musb/musb_gadget.h
@@ -105,4 +105,6 @@
extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int);
+extern void musb_ep_restart(struct musb *, struct musb_request *);
+
#endif /* __MUSB_GADGET_H */
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 59bef8f..6dd03f4 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -261,6 +261,7 @@
ctrlrequest->wIndex & 0x0f;
struct musb_ep *musb_ep;
struct musb_hw_ep *ep;
+ struct musb_request *request;
void __iomem *regs;
int is_in;
u16 csr;
@@ -302,6 +303,14 @@
musb_writew(regs, MUSB_RXCSR, csr);
}
+ /* Maybe start the first request in the queue */
+ request = to_musb_request(
+ next_request(musb_ep));
+ if (!musb_ep->busy && request) {
+ DBG(3, "restarting the request\n");
+ musb_ep_restart(musb, request);
+ }
+
/* select ep0 again */
musb_ep_select(mbase, 0);
} break;
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 877d20b..9e65c47 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -660,6 +660,12 @@
qh->segsize = length;
+ /*
+ * Ensure the data reaches to main memory before starting
+ * DMA transfer
+ */
+ wmb();
+
if (!dma->channel_program(channel, pkt_size, mode,
urb->transfer_dma + offset, length)) {
dma->channel_release(channel);
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c
index 05aaac1..0bc9769 100644
--- a/drivers/usb/otg/twl4030-usb.c
+++ b/drivers/usb/otg/twl4030-usb.c
@@ -347,11 +347,20 @@
}
}
+static void __twl4030_phy_power(struct twl4030_usb *twl, int on)
+{
+ u8 pwr = twl4030_usb_read(twl, PHY_PWR_CTRL);
+
+ if (on)
+ pwr &= ~PHY_PWR_PHYPWD;
+ else
+ pwr |= PHY_PWR_PHYPWD;
+
+ WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
+}
+
static void twl4030_phy_power(struct twl4030_usb *twl, int on)
{
- u8 pwr;
-
- pwr = twl4030_usb_read(twl, PHY_PWR_CTRL);
if (on) {
regulator_enable(twl->usb3v1);
regulator_enable(twl->usb1v8);
@@ -365,15 +374,13 @@
twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0,
VUSB_DEDICATED2);
regulator_enable(twl->usb1v5);
- pwr &= ~PHY_PWR_PHYPWD;
- WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
+ __twl4030_phy_power(twl, 1);
twl4030_usb_write(twl, PHY_CLK_CTRL,
twl4030_usb_read(twl, PHY_CLK_CTRL) |
(PHY_CLK_CTRL_CLOCKGATING_EN |
PHY_CLK_CTRL_CLK32K_EN));
- } else {
- pwr |= PHY_PWR_PHYPWD;
- WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
+ } else {
+ __twl4030_phy_power(twl, 0);
regulator_disable(twl->usb1v5);
regulator_disable(twl->usb1v8);
regulator_disable(twl->usb3v1);
@@ -387,19 +394,25 @@
twl4030_phy_power(twl, 0);
twl->asleep = 1;
+ dev_dbg(twl->dev, "%s\n", __func__);
+}
+
+static void __twl4030_phy_resume(struct twl4030_usb *twl)
+{
+ twl4030_phy_power(twl, 1);
+ twl4030_i2c_access(twl, 1);
+ twl4030_usb_set_mode(twl, twl->usb_mode);
+ if (twl->usb_mode == T2_USB_MODE_ULPI)
+ twl4030_i2c_access(twl, 0);
}
static void twl4030_phy_resume(struct twl4030_usb *twl)
{
if (!twl->asleep)
return;
-
- twl4030_phy_power(twl, 1);
- twl4030_i2c_access(twl, 1);
- twl4030_usb_set_mode(twl, twl->usb_mode);
- if (twl->usb_mode == T2_USB_MODE_ULPI)
- twl4030_i2c_access(twl, 0);
+ __twl4030_phy_resume(twl);
twl->asleep = 0;
+ dev_dbg(twl->dev, "%s\n", __func__);
}
static int twl4030_usb_ldo_init(struct twl4030_usb *twl)
@@ -408,8 +421,8 @@
twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0xC0, PROTECT_KEY);
twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0x0C, PROTECT_KEY);
- /* put VUSB3V1 LDO in active state */
- twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2);
+ /* Keep VUSB3V1 LDO in sleep state until VBUS/ID change detected*/
+ /*twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2);*/
/* input to VUSB3V1 LDO is from VBAT, not VBUS */
twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x14, VUSB_DEDICATED1);
@@ -502,6 +515,26 @@
return IRQ_HANDLED;
}
+static void twl4030_usb_phy_init(struct twl4030_usb *twl)
+{
+ int status;
+
+ status = twl4030_usb_linkstat(twl);
+ if (status >= 0) {
+ if (status == USB_EVENT_NONE) {
+ __twl4030_phy_power(twl, 0);
+ twl->asleep = 1;
+ } else {
+ __twl4030_phy_resume(twl);
+ twl->asleep = 0;
+ }
+
+ blocking_notifier_call_chain(&twl->otg.notifier, status,
+ twl->otg.gadget);
+ }
+ sysfs_notify(&twl->dev->kobj, NULL, "vbus");
+}
+
static int twl4030_set_suspend(struct otg_transceiver *x, int suspend)
{
struct twl4030_usb *twl = xceiv_to_twl(x);
@@ -550,7 +583,6 @@
struct twl4030_usb_data *pdata = pdev->dev.platform_data;
struct twl4030_usb *twl;
int status, err;
- u8 pwr;
if (!pdata) {
dev_dbg(&pdev->dev, "platform_data not available\n");
@@ -569,10 +601,7 @@
twl->otg.set_peripheral = twl4030_set_peripheral;
twl->otg.set_suspend = twl4030_set_suspend;
twl->usb_mode = pdata->usb_mode;
-
- pwr = twl4030_usb_read(twl, PHY_PWR_CTRL);
-
- twl->asleep = (pwr & PHY_PWR_PHYPWD);
+ twl->asleep = 1;
/* init spinlock for workqueue */
spin_lock_init(&twl->lock);
@@ -610,15 +639,10 @@
return status;
}
- /* The IRQ handler just handles changes from the previous states
- * of the ID and VBUS pins ... in probe() we must initialize that
- * previous state. The easy way: fake an IRQ.
- *
- * REVISIT: a real IRQ might have happened already, if PREEMPT is
- * enabled. Else the IRQ may not yet be configured or enabled,
- * because of scheduling delays.
+ /* Power down phy or make it work according to
+ * current link state.
*/
- twl4030_usb_irq(twl->irq, twl);
+ twl4030_usb_phy_init(twl);
dev_info(&pdev->dev, "Initialized TWL4030 USB module\n");
return 0;
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 30922a7..aa66581 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -2024,6 +2024,9 @@
case TIOCGICOUNT:
cnow = mos7720_port->icount;
+
+ memset(&icount, 0, sizeof(struct serial_icounter_struct));
+
icount.cts = cnow.cts;
icount.dsr = cnow.dsr;
icount.rng = cnow.rng;
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 1c9b6e9..1a42bc2 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -2285,6 +2285,9 @@
case TIOCGICOUNT:
cnow = mos7840_port->icount;
smp_rmb();
+
+ memset(&icount, 0, sizeof(struct serial_icounter_struct));
+
icount.cts = cnow.cts;
icount.dsr = cnow.dsr;
icount.rng = cnow.rng;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 29e850a..7c80082 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -243,7 +243,7 @@
int r, nlogs = 0;
while (datalen > 0) {
- if (unlikely(headcount >= VHOST_NET_MAX_SG)) {
+ if (unlikely(seg >= VHOST_NET_MAX_SG)) {
r = -ENOBUFS;
goto err;
}
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 4b99117..dd3d6f7 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -60,17 +60,8 @@
return 0;
}
-/* Init poll structure */
-void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
- unsigned long mask, struct vhost_dev *dev)
+static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
{
- struct vhost_work *work = &poll->work;
-
- init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
- init_poll_funcptr(&poll->table, vhost_poll_func);
- poll->mask = mask;
- poll->dev = dev;
-
INIT_LIST_HEAD(&work->node);
work->fn = fn;
init_waitqueue_head(&work->done);
@@ -78,6 +69,18 @@
work->queue_seq = work->done_seq = 0;
}
+/* Init poll structure */
+void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
+ unsigned long mask, struct vhost_dev *dev)
+{
+ init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
+ init_poll_funcptr(&poll->table, vhost_poll_func);
+ poll->mask = mask;
+ poll->dev = dev;
+
+ vhost_work_init(&poll->work, fn);
+}
+
/* Start polling a file. We add ourselves to file's wait queue. The caller must
* keep a reference to a file until after vhost_poll_stop is called. */
void vhost_poll_start(struct vhost_poll *poll, struct file *file)
@@ -95,35 +98,38 @@
remove_wait_queue(poll->wqh, &poll->wait);
}
-/* Flush any work that has been scheduled. When calling this, don't hold any
- * locks that are also used by the callback. */
-void vhost_poll_flush(struct vhost_poll *poll)
+static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
{
- struct vhost_work *work = &poll->work;
unsigned seq;
int left;
int flushing;
- spin_lock_irq(&poll->dev->work_lock);
+ spin_lock_irq(&dev->work_lock);
seq = work->queue_seq;
work->flushing++;
- spin_unlock_irq(&poll->dev->work_lock);
+ spin_unlock_irq(&dev->work_lock);
wait_event(work->done, ({
- spin_lock_irq(&poll->dev->work_lock);
+ spin_lock_irq(&dev->work_lock);
left = seq - work->done_seq <= 0;
- spin_unlock_irq(&poll->dev->work_lock);
+ spin_unlock_irq(&dev->work_lock);
left;
}));
- spin_lock_irq(&poll->dev->work_lock);
+ spin_lock_irq(&dev->work_lock);
flushing = --work->flushing;
- spin_unlock_irq(&poll->dev->work_lock);
+ spin_unlock_irq(&dev->work_lock);
BUG_ON(flushing < 0);
}
-void vhost_poll_queue(struct vhost_poll *poll)
+/* Flush any work that has been scheduled. When calling this, don't hold any
+ * locks that are also used by the callback. */
+void vhost_poll_flush(struct vhost_poll *poll)
{
- struct vhost_dev *dev = poll->dev;
- struct vhost_work *work = &poll->work;
+ vhost_work_flush(poll->dev, &poll->work);
+}
+
+static inline void vhost_work_queue(struct vhost_dev *dev,
+ struct vhost_work *work)
+{
unsigned long flags;
spin_lock_irqsave(&dev->work_lock, flags);
@@ -135,6 +141,11 @@
spin_unlock_irqrestore(&dev->work_lock, flags);
}
+void vhost_poll_queue(struct vhost_poll *poll)
+{
+ vhost_work_queue(poll->dev, &poll->work);
+}
+
static void vhost_vq_reset(struct vhost_dev *dev,
struct vhost_virtqueue *vq)
{
@@ -236,6 +247,29 @@
return dev->mm == current->mm ? 0 : -EPERM;
}
+struct vhost_attach_cgroups_struct {
+ struct vhost_work work;
+ struct task_struct *owner;
+ int ret;
+};
+
+static void vhost_attach_cgroups_work(struct vhost_work *work)
+{
+ struct vhost_attach_cgroups_struct *s;
+ s = container_of(work, struct vhost_attach_cgroups_struct, work);
+ s->ret = cgroup_attach_task_all(s->owner, current);
+}
+
+static int vhost_attach_cgroups(struct vhost_dev *dev)
+{
+ struct vhost_attach_cgroups_struct attach;
+ attach.owner = current;
+ vhost_work_init(&attach.work, vhost_attach_cgroups_work);
+ vhost_work_queue(dev, &attach.work);
+ vhost_work_flush(dev, &attach.work);
+ return attach.ret;
+}
+
/* Caller should have device mutex */
static long vhost_dev_set_owner(struct vhost_dev *dev)
{
@@ -255,14 +289,16 @@
}
dev->worker = worker;
- err = cgroup_attach_task_current_cg(worker);
+ wake_up_process(worker); /* avoid contributing to loadavg */
+
+ err = vhost_attach_cgroups(dev);
if (err)
goto err_cgroup;
- wake_up_process(worker); /* avoid contributing to loadavg */
return 0;
err_cgroup:
kthread_stop(worker);
+ dev->worker = NULL;
err_worker:
if (dev->mm)
mmput(dev->mm);
@@ -822,11 +858,12 @@
if (r < 0)
return r;
len -= l;
- if (!len)
+ if (!len) {
+ if (vq->log_ctx)
+ eventfd_signal(vq->log_ctx, 1);
return 0;
+ }
}
- if (vq->log_ctx)
- eventfd_signal(vq->log_ctx, 1);
/* Length written exceeds what we have stored. This is a bug. */
BUG();
return 0;
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 84f8423..7ccc967 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -3508,7 +3508,7 @@
softback_buf = 0UL;
for (i = 0; i < FB_MAX; i++) {
- int pending;
+ int pending = 0;
mapped = 0;
info = registered_fb[i];
@@ -3516,7 +3516,8 @@
if (info == NULL)
continue;
- pending = cancel_work_sync(&info->queue);
+ if (info->queue.func)
+ pending = cancel_work_sync(&info->queue);
DPRINTK("fbcon: %s pending work\n", (pending ? "canceled" :
"no"));
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index 815f84b..70477c2 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -13,7 +13,7 @@
#include <linux/platform_device.h>
#include <linux/screen_info.h>
#include <linux/dmi.h>
-
+#include <linux/pci.h>
#include <video/vga.h>
static struct fb_var_screeninfo efifb_defined __devinitdata = {
@@ -39,17 +39,31 @@
M_I20, /* 20-Inch iMac */
M_I20_SR, /* 20-Inch iMac (Santa Rosa) */
M_I24, /* 24-Inch iMac */
+ M_I24_8_1, /* 24-Inch iMac, 8,1th gen */
+ M_I24_10_1, /* 24-Inch iMac, 10,1th gen */
+ M_I27_11_1, /* 27-Inch iMac, 11,1th gen */
M_MINI, /* Mac Mini */
+ M_MINI_3_1, /* Mac Mini, 3,1th gen */
+ M_MINI_4_1, /* Mac Mini, 4,1th gen */
M_MB, /* MacBook */
M_MB_2, /* MacBook, 2nd rev. */
M_MB_3, /* MacBook, 3rd rev. */
+ M_MB_5_1, /* MacBook, 5th rev. */
+ M_MB_6_1, /* MacBook, 6th rev. */
+ M_MB_7_1, /* MacBook, 7th rev. */
M_MB_SR, /* MacBook, 2nd gen, (Santa Rosa) */
M_MBA, /* MacBook Air */
M_MBP, /* MacBook Pro */
M_MBP_2, /* MacBook Pro 2nd gen */
+ M_MBP_2_2, /* MacBook Pro 2,2nd gen */
M_MBP_SR, /* MacBook Pro (Santa Rosa) */
M_MBP_4, /* MacBook Pro, 4th gen */
M_MBP_5_1, /* MacBook Pro, 5,1th gen */
+ M_MBP_5_2, /* MacBook Pro, 5,2th gen */
+ M_MBP_5_3, /* MacBook Pro, 5,3rd gen */
+ M_MBP_6_1, /* MacBook Pro, 6,1th gen */
+ M_MBP_6_2, /* MacBook Pro, 6,2th gen */
+ M_MBP_7_1, /* MacBook Pro, 7,1th gen */
M_UNKNOWN /* placeholder */
};
@@ -64,14 +78,28 @@
[M_I20] = { "i20", 0x80010000, 1728 * 4, 1680, 1050 }, /* guess */
[M_I20_SR] = { "imac7", 0x40010000, 1728 * 4, 1680, 1050 },
[M_I24] = { "i24", 0x80010000, 2048 * 4, 1920, 1200 }, /* guess */
+ [M_I24_8_1] = { "imac8", 0xc0060000, 2048 * 4, 1920, 1200 },
+ [M_I24_10_1] = { "imac10", 0xc0010000, 2048 * 4, 1920, 1080 },
+ [M_I27_11_1] = { "imac11", 0xc0010000, 2560 * 4, 2560, 1440 },
[M_MINI]= { "mini", 0x80000000, 2048 * 4, 1024, 768 },
+ [M_MINI_3_1] = { "mini31", 0x40010000, 1024 * 4, 1024, 768 },
+ [M_MINI_4_1] = { "mini41", 0xc0010000, 2048 * 4, 1920, 1200 },
[M_MB] = { "macbook", 0x80000000, 2048 * 4, 1280, 800 },
+ [M_MB_5_1] = { "macbook51", 0x80010000, 2048 * 4, 1280, 800 },
+ [M_MB_6_1] = { "macbook61", 0x80010000, 2048 * 4, 1280, 800 },
+ [M_MB_7_1] = { "macbook71", 0x80010000, 2048 * 4, 1280, 800 },
[M_MBA] = { "mba", 0x80000000, 2048 * 4, 1280, 800 },
[M_MBP] = { "mbp", 0x80010000, 1472 * 4, 1440, 900 },
[M_MBP_2] = { "mbp2", 0, 0, 0, 0 }, /* placeholder */
+ [M_MBP_2_2] = { "mbp22", 0x80010000, 1472 * 4, 1440, 900 },
[M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900 },
[M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200 },
[M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900 },
+ [M_MBP_5_2] = { "mbp52", 0xc0010000, 2048 * 4, 1920, 1200 },
+ [M_MBP_5_3] = { "mbp53", 0xd0010000, 2048 * 4, 1440, 900 },
+ [M_MBP_6_1] = { "mbp61", 0x90030000, 2048 * 4, 1920, 1200 },
+ [M_MBP_6_2] = { "mbp62", 0x90030000, 2048 * 4, 1680, 1050 },
+ [M_MBP_7_1] = { "mbp71", 0xc0010000, 2048 * 4, 1280, 800 },
[M_UNKNOWN] = { NULL, 0, 0, 0, 0 }
};
@@ -92,7 +120,12 @@
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac6,1", M_I24),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac6,1", M_I24),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac7,1", M_I20_SR),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac8,1", M_I24_8_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac10,1", M_I24_10_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac11,1", M_I27_11_1),
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "Macmini1,1", M_MINI),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini3,1", M_MINI_3_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini4,1", M_MINI_4_1),
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook1,1", M_MB),
/* At least one of these two will be right; maybe both? */
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook2,1", M_MB),
@@ -101,14 +134,23 @@
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook3,1", M_MB),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook3,1", M_MB),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook4,1", M_MB),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook5,1", M_MB_5_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook6,1", M_MB_6_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook7,1", M_MB_7_1),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir1,1", M_MBA),
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro1,1", M_MBP),
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,1", M_MBP_2),
+ EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,2", M_MBP_2_2),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro2,1", M_MBP_2),
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,2", M_MBP_5_2),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,3", M_MBP_5_3),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,1", M_MBP_6_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,2", M_MBP_6_2),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro7,1", M_MBP_7_1),
{},
};
@@ -116,7 +158,7 @@
{
struct efifb_dmi_info *info = id->driver_data;
if (info->base == 0)
- return -ENODEV;
+ return 0;
printk(KERN_INFO "efifb: dmi detected %s - framebuffer at %p "
"(%dx%d, stride %d)\n", id->ident,
@@ -124,18 +166,55 @@
info->stride);
/* Trust the bootloader over the DMI tables */
- if (screen_info.lfb_base == 0)
+ if (screen_info.lfb_base == 0) {
+#if defined(CONFIG_PCI)
+ struct pci_dev *dev = NULL;
+ int found_bar = 0;
+#endif
screen_info.lfb_base = info->base;
- if (screen_info.lfb_linelength == 0)
- screen_info.lfb_linelength = info->stride;
- if (screen_info.lfb_width == 0)
- screen_info.lfb_width = info->width;
- if (screen_info.lfb_height == 0)
- screen_info.lfb_height = info->height;
- if (screen_info.orig_video_isVGA == 0)
- screen_info.orig_video_isVGA = VIDEO_TYPE_EFI;
- return 0;
+#if defined(CONFIG_PCI)
+ /* make sure that the address in the table is actually on a
+ * VGA device's PCI BAR */
+
+ for_each_pci_dev(dev) {
+ int i;
+ if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
+ continue;
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ resource_size_t start, end;
+
+ start = pci_resource_start(dev, i);
+ if (start == 0)
+ break;
+ end = pci_resource_end(dev, i);
+ if (screen_info.lfb_base >= start &&
+ screen_info.lfb_base < end) {
+ found_bar = 1;
+ }
+ }
+ }
+ if (!found_bar)
+ screen_info.lfb_base = 0;
+#endif
+ }
+ if (screen_info.lfb_base) {
+ if (screen_info.lfb_linelength == 0)
+ screen_info.lfb_linelength = info->stride;
+ if (screen_info.lfb_width == 0)
+ screen_info.lfb_width = info->width;
+ if (screen_info.lfb_height == 0)
+ screen_info.lfb_height = info->height;
+ if (screen_info.orig_video_isVGA == 0)
+ screen_info.orig_video_isVGA = VIDEO_TYPE_EFI;
+ } else {
+ screen_info.lfb_linelength = 0;
+ screen_info.lfb_width = 0;
+ screen_info.lfb_height = 0;
+ screen_info.orig_video_isVGA = 0;
+ return 0;
+ }
+ return 1;
}
static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c
index c91a7f7..a31a77f 100644
--- a/drivers/video/pxa168fb.c
+++ b/drivers/video/pxa168fb.c
@@ -298,8 +298,8 @@
* Set bit to enable graphics DMA.
*/
x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0);
- x |= fbi->active ? 0x00000100 : 0;
- fbi->active = 0;
+ x &= ~CFG_GRA_ENA_MASK;
+ x |= fbi->active ? CFG_GRA_ENA(1) : CFG_GRA_ENA(0);
/*
* If we are in a pseudo-color mode, we need to enable
@@ -559,7 +559,7 @@
.fb_imageblit = cfb_imageblit,
};
-static int __init pxa168fb_init_mode(struct fb_info *info,
+static int __devinit pxa168fb_init_mode(struct fb_info *info,
struct pxa168fb_mach_info *mi)
{
struct pxa168fb_info *fbi = info->par;
@@ -599,7 +599,7 @@
return ret;
}
-static int __init pxa168fb_probe(struct platform_device *pdev)
+static int __devinit pxa168fb_probe(struct platform_device *pdev)
{
struct pxa168fb_mach_info *mi;
struct fb_info *info = 0;
@@ -792,7 +792,7 @@
.probe = pxa168fb_probe,
};
-static int __devinit pxa168fb_init(void)
+static int __init pxa168fb_init(void)
{
return platform_driver_register(&pxa168fb_driver);
}
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 559bf17..b52f8e4 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -1701,6 +1701,9 @@
break;
case FBIOGET_VBLANK:
+
+ memset(&sisvbblank, 0, sizeof(struct fb_vblank));
+
sisvbblank.count = 0;
sisvbblank.flags = sisfb_setupvbblankflags(ivideo, &sisvbblank.vcount, &sisvbblank.hcount);
diff --git a/drivers/video/via/ioctl.c b/drivers/video/via/ioctl.c
index da03c07..4d553d0 100644
--- a/drivers/video/via/ioctl.c
+++ b/drivers/video/via/ioctl.c
@@ -25,6 +25,8 @@
{
struct viafb_ioctl_info viainfo;
+ memset(&viainfo, 0, sizeof(struct viafb_ioctl_info));
+
viainfo.viafb_id = VIAID;
viainfo.vendor_id = PCI_VIA_VENDOR_ID;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index b036677..24efd8e 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -213,11 +213,11 @@
here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog timer.
config PNX4008_WATCHDOG
- tristate "PNX4008 Watchdog"
- depends on ARCH_PNX4008
+ tristate "PNX4008 and LPC32XX Watchdog"
+ depends on ARCH_PNX4008 || ARCH_LPC32XX
help
Say Y here if to include support for the watchdog timer
- in the PNX4008 processor.
+ in the PNX4008 or LPC32XX processor.
This driver can be built as a module by choosing M. The module
will be called pnx4008_wdt.
diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c
index 88c83aa..f31493e 100644
--- a/drivers/watchdog/sb_wdog.c
+++ b/drivers/watchdog/sb_wdog.c
@@ -305,7 +305,7 @@
if (ret) {
printk(KERN_ERR "%s: failed to request irq 1 - %d\n",
ident.identity, ret);
- return ret;
+ goto out;
}
ret = misc_register(&sbwdog_miscdev);
@@ -313,14 +313,20 @@
printk(KERN_INFO "%s: timeout is %ld.%ld secs\n",
ident.identity,
timeout / 1000000, (timeout / 100000) % 10);
- } else
- free_irq(1, (void *)user_dog);
+ return 0;
+ }
+ free_irq(1, (void *)user_dog);
+out:
+ unregister_reboot_notifier(&sbwdog_notifier);
+
return ret;
}
static void __exit sbwdog_exit(void)
{
misc_deregister(&sbwdog_miscdev);
+ free_irq(1, (void *)user_dog);
+ unregister_reboot_notifier(&sbwdog_notifier);
}
module_init(sbwdog_init);
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index 458c499..18cdeb4 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -449,6 +449,9 @@
wdt->pdev = pdev;
mutex_init(&wdt->lock);
+ /* make sure that the watchdog is disabled */
+ ts72xx_wdt_stop(wdt);
+
error = misc_register(&ts72xx_wdt_miscdev);
if (error) {
dev_err(&pdev->dev, "failed to register miscdev\n");
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
index 16c8a2a..899f168 100644
--- a/fs/9p/vfs_dir.c
+++ b/fs/9p/vfs_dir.c
@@ -292,9 +292,11 @@
fid = filp->private_data;
P9_DPRINTK(P9_DEBUG_VFS,
- "inode: %p filp: %p fid: %d\n", inode, filp, fid->fid);
+ "v9fs_dir_release: inode: %p filp: %p fid: %d\n",
+ inode, filp, fid ? fid->fid : -1);
filemap_write_and_wait(inode->i_mapping);
- p9_client_clunk(fid);
+ if (fid)
+ p9_client_clunk(fid);
return 0;
}
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index c7c23ea..9e670d5 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -730,7 +730,10 @@
P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
goto error;
}
- dentry->d_op = &v9fs_cached_dentry_operations;
+ if (v9ses->cache)
+ dentry->d_op = &v9fs_cached_dentry_operations;
+ else
+ dentry->d_op = &v9fs_dentry_operations;
d_instantiate(dentry, inode);
err = v9fs_fid_add(dentry, fid);
if (err < 0)
@@ -1128,6 +1131,7 @@
v9fs_stat2inode(st, dentry->d_inode, dentry->d_inode->i_sb);
generic_fillattr(dentry->d_inode, stat);
+ p9stat_free(st);
kfree(st);
return 0;
}
@@ -1489,6 +1493,7 @@
retval = strnlen(buffer, buflen);
done:
+ p9stat_free(st);
kfree(st);
return retval;
}
@@ -1942,7 +1947,7 @@
.unlink = v9fs_vfs_unlink,
.mkdir = v9fs_vfs_mkdir,
.rmdir = v9fs_vfs_rmdir,
- .mknod = v9fs_vfs_mknod_dotl,
+ .mknod = v9fs_vfs_mknod,
.rename = v9fs_vfs_rename,
.getattr = v9fs_vfs_getattr,
.setattr = v9fs_vfs_setattr,
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index f931107..1d12ba0 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -122,6 +122,10 @@
fid = v9fs_session_init(v9ses, dev_name, data);
if (IS_ERR(fid)) {
retval = PTR_ERR(fid);
+ /*
+ * we need to call session_close to tear down some
+ * of the data structure setup by session_init
+ */
goto close_session;
}
@@ -144,7 +148,6 @@
retval = -ENOMEM;
goto release_sb;
}
-
sb->s_root = root;
if (v9fs_proto_dotl(v9ses)) {
@@ -152,7 +155,7 @@
st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
if (IS_ERR(st)) {
retval = PTR_ERR(st);
- goto clunk_fid;
+ goto release_sb;
}
v9fs_stat2inode_dotl(st, root->d_inode);
@@ -162,7 +165,7 @@
st = p9_client_stat(fid);
if (IS_ERR(st)) {
retval = PTR_ERR(st);
- goto clunk_fid;
+ goto release_sb;
}
root->d_inode->i_ino = v9fs_qid2ino(&st->qid);
@@ -174,19 +177,24 @@
v9fs_fid_add(root, fid);
-P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n");
+ P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n");
simple_set_mnt(mnt, sb);
return 0;
clunk_fid:
p9_client_clunk(fid);
-
close_session:
v9fs_session_close(v9ses);
kfree(v9ses);
return retval;
-
release_sb:
+ /*
+ * we will do the session_close and root dentry release
+ * in the below call. But we need to clunk fid, because we haven't
+ * attached the fid to dentry so it won't get clunked
+ * automatically.
+ */
+ p9_client_clunk(fid);
deactivate_locked_super(sb);
return retval;
}
diff --git a/fs/aio.c b/fs/aio.c
index 3006b5b..250b0a7 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -712,8 +712,16 @@
*/
ret = retry(iocb);
- if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED)
+ if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) {
+ /*
+ * There's no easy way to restart the syscall since other AIO's
+ * may be already running. Just fail this IO with EINTR.
+ */
+ if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR ||
+ ret == -ERESTARTNOHAND || ret == -ERESTART_RESTARTBLOCK))
+ ret = -EINTR;
aio_complete(iocb, ret, 0);
+ }
out:
spin_lock_irq(&ctx->ctx_lock);
@@ -1659,6 +1667,9 @@
if (unlikely(nr < 0))
return -EINVAL;
+ if (unlikely(nr > LONG_MAX/sizeof(*iocbpp)))
+ nr = LONG_MAX/sizeof(*iocbpp);
+
if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp)))))
return -EFAULT;
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 535e763..6884e19 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -800,7 +800,7 @@
* default mmap base, as well as whatever program they
* might try to exec. This is because the brk will
* follow the loader, and is not movable. */
-#ifdef CONFIG_X86
+#if defined(CONFIG_X86) || defined(CONFIG_ARM)
load_bias = 0;
#else
load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index a7528b9..fd0cc0b 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -724,7 +724,7 @@
{
int err = register_filesystem(&bm_fs_type);
if (!err) {
- err = register_binfmt(&misc_format);
+ err = insert_binfmt(&misc_format);
if (err)
unregister_filesystem(&bm_fs_type);
}
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 612a5c3..4d0ff5e 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -413,10 +413,10 @@
/* Allocate kernel buffer for protection data */
len = sectors * blk_integrity_tuple_size(bi);
- buf = kmalloc(len, GFP_NOIO | __GFP_NOFAIL | q->bounce_gfp);
+ buf = kmalloc(len, GFP_NOIO | q->bounce_gfp);
if (unlikely(buf == NULL)) {
printk(KERN_ERR "could not allocate integrity buffer\n");
- return -EIO;
+ return -ENOMEM;
}
end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig
index bc87b9c..0fcd264 100644
--- a/fs/ceph/Kconfig
+++ b/fs/ceph/Kconfig
@@ -3,6 +3,7 @@
depends on INET && EXPERIMENTAL
select LIBCRC32C
select CRYPTO_AES
+ select CRYPTO
help
Choose Y or M here to include support for mounting the
experimental Ceph distributed file system. Ceph is an extremely
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 4cfce1e..efbc604 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -411,8 +411,8 @@
if (i_size < page_off + len)
len = i_size - page_off;
- dout("writepage %p page %p index %lu on %llu~%u\n",
- inode, page, page->index, page_off, len);
+ dout("writepage %p page %p index %lu on %llu~%u snapc %p\n",
+ inode, page, page->index, page_off, len, snapc);
writeback_stat = atomic_long_inc_return(&client->writeback_count);
if (writeback_stat >
@@ -766,7 +766,8 @@
/* ok */
if (locked_pages == 0) {
/* prepare async write request */
- offset = page->index << PAGE_CACHE_SHIFT;
+ offset = (unsigned long long)page->index
+ << PAGE_CACHE_SHIFT;
len = wsize;
req = ceph_osdc_new_request(&client->osdc,
&ci->i_layout,
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index a2069b6..73c1530 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -814,7 +814,7 @@
used |= CEPH_CAP_PIN;
if (ci->i_rd_ref)
used |= CEPH_CAP_FILE_RD;
- if (ci->i_rdcache_ref || ci->i_rdcache_gen)
+ if (ci->i_rdcache_ref || ci->vfs_inode.i_data.nrpages)
used |= CEPH_CAP_FILE_CACHE;
if (ci->i_wr_ref)
used |= CEPH_CAP_FILE_WR;
@@ -1195,10 +1195,14 @@
* asynchronously back to the MDS once sync writes complete and dirty
* data is written out.
*
+ * Unless @again is true, skip cap_snaps that were already sent to
+ * the MDS (i.e., during this session).
+ *
* Called under i_lock. Takes s_mutex as needed.
*/
void __ceph_flush_snaps(struct ceph_inode_info *ci,
- struct ceph_mds_session **psession)
+ struct ceph_mds_session **psession,
+ int again)
__releases(ci->vfs_inode->i_lock)
__acquires(ci->vfs_inode->i_lock)
{
@@ -1227,7 +1231,7 @@
* pages to be written out.
*/
if (capsnap->dirty_pages || capsnap->writing)
- continue;
+ break;
/*
* if cap writeback already occurred, we should have dropped
@@ -1240,6 +1244,13 @@
dout("no auth cap (migrating?), doing nothing\n");
goto out;
}
+
+ /* only flush each capsnap once */
+ if (!again && !list_empty(&capsnap->flushing_item)) {
+ dout("already flushed %p, skipping\n", capsnap);
+ continue;
+ }
+
mds = ci->i_auth_cap->session->s_mds;
mseq = ci->i_auth_cap->mseq;
@@ -1276,8 +1287,8 @@
&session->s_cap_snaps_flushing);
spin_unlock(&inode->i_lock);
- dout("flush_snaps %p cap_snap %p follows %lld size %llu\n",
- inode, capsnap, next_follows, capsnap->size);
+ dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
+ inode, capsnap, capsnap->follows, capsnap->flush_tid);
send_cap_msg(session, ceph_vino(inode).ino, 0,
CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
capsnap->dirty, 0, capsnap->flush_tid, 0, mseq,
@@ -1314,7 +1325,7 @@
struct inode *inode = &ci->vfs_inode;
spin_lock(&inode->i_lock);
- __ceph_flush_snaps(ci, NULL);
+ __ceph_flush_snaps(ci, NULL, 0);
spin_unlock(&inode->i_lock);
}
@@ -1477,7 +1488,7 @@
/* flush snaps first time around only */
if (!list_empty(&ci->i_cap_snaps))
- __ceph_flush_snaps(ci, &session);
+ __ceph_flush_snaps(ci, &session, 0);
goto retry_locked;
retry:
spin_lock(&inode->i_lock);
@@ -1894,7 +1905,7 @@
if (cap && cap->session == session) {
dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
cap, capsnap);
- __ceph_flush_snaps(ci, &session);
+ __ceph_flush_snaps(ci, &session, 1);
} else {
pr_err("%p auth cap %p not mds%d ???\n", inode,
cap, session->s_mds);
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 6e4f43f..a1986eb 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1021,11 +1021,15 @@
static void ceph_dentry_release(struct dentry *dentry)
{
struct ceph_dentry_info *di = ceph_dentry(dentry);
- struct inode *parent_inode = dentry->d_parent->d_inode;
- u64 snapid = ceph_snap(parent_inode);
+ struct inode *parent_inode = NULL;
+ u64 snapid = CEPH_NOSNAP;
+ if (!IS_ROOT(dentry)) {
+ parent_inode = dentry->d_parent->d_inode;
+ if (parent_inode)
+ snapid = ceph_snap(parent_inode);
+ }
dout("dentry_release %p parent %p\n", dentry, parent_inode);
-
if (parent_inode && snapid != CEPH_SNAPDIR) {
struct ceph_inode_info *ci = ceph_inode(parent_inode);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index e7cca414..62377ec 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -845,7 +845,7 @@
* the caller) if we fail.
*/
static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
- bool *prehash)
+ bool *prehash, bool set_offset)
{
struct dentry *realdn;
@@ -877,7 +877,8 @@
}
if ((!prehash || *prehash) && d_unhashed(dn))
d_rehash(dn);
- ceph_set_dentry_offset(dn);
+ if (set_offset)
+ ceph_set_dentry_offset(dn);
out:
return dn;
}
@@ -1062,7 +1063,7 @@
d_delete(dn);
goto done;
}
- dn = splice_dentry(dn, in, &have_lease);
+ dn = splice_dentry(dn, in, &have_lease, true);
if (IS_ERR(dn)) {
err = PTR_ERR(dn);
goto done;
@@ -1105,7 +1106,7 @@
goto done;
}
dout(" linking snapped dir %p to dn %p\n", in, dn);
- dn = splice_dentry(dn, in, NULL);
+ dn = splice_dentry(dn, in, NULL, true);
if (IS_ERR(dn)) {
err = PTR_ERR(dn);
goto done;
@@ -1237,7 +1238,7 @@
err = PTR_ERR(in);
goto out;
}
- dn = splice_dentry(dn, in, NULL);
+ dn = splice_dentry(dn, in, NULL, false);
if (IS_ERR(dn))
dn = NULL;
}
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index f091b13..fad95f8 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2374,6 +2374,8 @@
num_fcntl_locks,
num_flock_locks);
unlock_kernel();
+ } else {
+ err = ceph_pagelist_append(pagelist, &rec, reclen);
}
out_free:
diff --git a/fs/ceph/pagelist.c b/fs/ceph/pagelist.c
index b6859f4..46a368b 100644
--- a/fs/ceph/pagelist.c
+++ b/fs/ceph/pagelist.c
@@ -5,10 +5,18 @@
#include "pagelist.h"
+static void ceph_pagelist_unmap_tail(struct ceph_pagelist *pl)
+{
+ struct page *page = list_entry(pl->head.prev, struct page,
+ lru);
+ kunmap(page);
+}
+
int ceph_pagelist_release(struct ceph_pagelist *pl)
{
if (pl->mapped_tail)
- kunmap(pl->mapped_tail);
+ ceph_pagelist_unmap_tail(pl);
+
while (!list_empty(&pl->head)) {
struct page *page = list_first_entry(&pl->head, struct page,
lru);
@@ -26,7 +34,7 @@
pl->room += PAGE_SIZE;
list_add_tail(&page->lru, &pl->head);
if (pl->mapped_tail)
- kunmap(pl->mapped_tail);
+ ceph_pagelist_unmap_tail(pl);
pl->mapped_tail = kmap(page);
return 0;
}
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 4868b9d..190b6c4 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -119,6 +119,7 @@
INIT_LIST_HEAD(&realm->children);
INIT_LIST_HEAD(&realm->child_item);
INIT_LIST_HEAD(&realm->empty_item);
+ INIT_LIST_HEAD(&realm->dirty_item);
INIT_LIST_HEAD(&realm->inodes_with_caps);
spin_lock_init(&realm->inodes_with_caps_lock);
__insert_snap_realm(&mdsc->snap_realms, realm);
@@ -467,7 +468,7 @@
INIT_LIST_HEAD(&capsnap->ci_item);
INIT_LIST_HEAD(&capsnap->flushing_item);
- capsnap->follows = snapc->seq - 1;
+ capsnap->follows = snapc->seq;
capsnap->issued = __ceph_caps_issued(ci, NULL);
capsnap->dirty = dirty;
@@ -604,6 +605,7 @@
struct ceph_snap_realm *realm;
int invalidate = 0;
int err = -ENOMEM;
+ LIST_HEAD(dirty_realms);
dout("update_snap_trace deletion=%d\n", deletion);
more:
@@ -626,24 +628,6 @@
}
}
- if (le64_to_cpu(ri->seq) > realm->seq) {
- dout("update_snap_trace updating %llx %p %lld -> %lld\n",
- realm->ino, realm, realm->seq, le64_to_cpu(ri->seq));
- /*
- * if the realm seq has changed, queue a cap_snap for every
- * inode with open caps. we do this _before_ we update
- * the realm info so that we prepare for writeback under the
- * _previous_ snap context.
- *
- * ...unless it's a snap deletion!
- */
- if (!deletion)
- queue_realm_cap_snaps(realm);
- } else {
- dout("update_snap_trace %llx %p seq %lld unchanged\n",
- realm->ino, realm, realm->seq);
- }
-
/* ensure the parent is correct */
err = adjust_snap_realm_parent(mdsc, realm, le64_to_cpu(ri->parent));
if (err < 0)
@@ -651,6 +635,8 @@
invalidate += err;
if (le64_to_cpu(ri->seq) > realm->seq) {
+ dout("update_snap_trace updating %llx %p %lld -> %lld\n",
+ realm->ino, realm, realm->seq, le64_to_cpu(ri->seq));
/* update realm parameters, snap lists */
realm->seq = le64_to_cpu(ri->seq);
realm->created = le64_to_cpu(ri->created);
@@ -668,9 +654,17 @@
if (err < 0)
goto fail;
+ /* queue realm for cap_snap creation */
+ list_add(&realm->dirty_item, &dirty_realms);
+
invalidate = 1;
} else if (!realm->cached_context) {
+ dout("update_snap_trace %llx %p seq %lld new\n",
+ realm->ino, realm, realm->seq);
invalidate = 1;
+ } else {
+ dout("update_snap_trace %llx %p seq %lld unchanged\n",
+ realm->ino, realm, realm->seq);
}
dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino,
@@ -683,6 +677,14 @@
if (invalidate)
rebuild_snap_realms(realm);
+ /*
+ * queue cap snaps _after_ we've built the new snap contexts,
+ * so that i_head_snapc can be set appropriately.
+ */
+ list_for_each_entry(realm, &dirty_realms, dirty_item) {
+ queue_realm_cap_snaps(realm);
+ }
+
__cleanup_empty_realms(mdsc);
return 0;
@@ -715,7 +717,7 @@
igrab(inode);
spin_unlock(&mdsc->snap_flush_lock);
spin_lock(&inode->i_lock);
- __ceph_flush_snaps(ci, &session);
+ __ceph_flush_snaps(ci, &session, 0);
spin_unlock(&inode->i_lock);
iput(inode);
spin_lock(&mdsc->snap_flush_lock);
@@ -816,6 +818,7 @@
};
struct inode *inode = ceph_find_inode(sb, vino);
struct ceph_inode_info *ci;
+ struct ceph_snap_realm *oldrealm;
if (!inode)
continue;
@@ -841,18 +844,19 @@
dout(" will move %p to split realm %llx %p\n",
inode, realm->ino, realm);
/*
- * Remove the inode from the realm's inode
- * list, but don't add it to the new realm
- * yet. We don't want the cap_snap to be
- * queued (again) by ceph_update_snap_trace()
- * below. Queue it _now_, under the old context.
+ * Move the inode to the new realm
*/
spin_lock(&realm->inodes_with_caps_lock);
list_del_init(&ci->i_snap_realm_item);
+ list_add(&ci->i_snap_realm_item,
+ &realm->inodes_with_caps);
+ oldrealm = ci->i_snap_realm;
+ ci->i_snap_realm = realm;
spin_unlock(&realm->inodes_with_caps_lock);
spin_unlock(&inode->i_lock);
- ceph_queue_cap_snap(ci);
+ ceph_get_snap_realm(mdsc, realm);
+ ceph_put_snap_realm(mdsc, oldrealm);
iput(inode);
continue;
@@ -880,43 +884,9 @@
ceph_update_snap_trace(mdsc, p, e,
op == CEPH_SNAP_OP_DESTROY);
- if (op == CEPH_SNAP_OP_SPLIT) {
- /*
- * ok, _now_ add the inodes into the new realm.
- */
- for (i = 0; i < num_split_inos; i++) {
- struct ceph_vino vino = {
- .ino = le64_to_cpu(split_inos[i]),
- .snap = CEPH_NOSNAP,
- };
- struct inode *inode = ceph_find_inode(sb, vino);
- struct ceph_inode_info *ci;
-
- if (!inode)
- continue;
- ci = ceph_inode(inode);
- spin_lock(&inode->i_lock);
- if (list_empty(&ci->i_snap_realm_item)) {
- struct ceph_snap_realm *oldrealm =
- ci->i_snap_realm;
-
- dout(" moving %p to split realm %llx %p\n",
- inode, realm->ino, realm);
- spin_lock(&realm->inodes_with_caps_lock);
- list_add(&ci->i_snap_realm_item,
- &realm->inodes_with_caps);
- ci->i_snap_realm = realm;
- spin_unlock(&realm->inodes_with_caps_lock);
- ceph_get_snap_realm(mdsc, realm);
- ceph_put_snap_realm(mdsc, oldrealm);
- }
- spin_unlock(&inode->i_lock);
- iput(inode);
- }
-
+ if (op == CEPH_SNAP_OP_SPLIT)
/* we took a reference when we created the realm, above */
ceph_put_snap_realm(mdsc, realm);
- }
__cleanup_empty_realms(mdsc);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index c33897a..b87638e 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -690,6 +690,8 @@
struct list_head empty_item; /* if i have ref==0 */
+ struct list_head dirty_item; /* if realm needs new context */
+
/* the current set of snaps for this realm */
struct ceph_snap_context *cached_context;
@@ -826,7 +828,8 @@
extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
struct ceph_snap_context *snapc);
extern void __ceph_flush_snaps(struct ceph_inode_info *ci,
- struct ceph_mds_session **psession);
+ struct ceph_mds_session **psession,
+ int again);
extern void ceph_check_caps(struct ceph_inode_info *ci, int flags,
struct ceph_mds_session *session);
extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc);
diff --git a/fs/char_dev.c b/fs/char_dev.c
index f80a4f2..143d393 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -40,7 +40,9 @@
#endif
/* permit direct mmap, for read, write or exec */
BDI_CAP_MAP_DIRECT |
- BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP),
+ BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP |
+ /* no writeback happens */
+ BDI_CAP_NO_ACCT_AND_WRITEBACK),
};
static struct kobj_map *cdev_map;
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 0da1deb..917b7d4 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -2,8 +2,6 @@
tristate "CIFS support (advanced network filesystem, SMBFS successor)"
depends on INET
select NLS
- select CRYPTO_MD5
- select CRYPTO_ARC4
help
This is the client VFS module for the Common Internet File System
(CIFS) protocol which is the successor to the Server Message Block
diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
index 21f0fbd..cfd1ce3 100644
--- a/fs/cifs/asn1.c
+++ b/fs/cifs/asn1.c
@@ -597,13 +597,13 @@
if (compare_oid(oid, oidlen, MSKRB5_OID,
MSKRB5_OID_LEN))
server->sec_mskerberos = true;
- if (compare_oid(oid, oidlen, KRB5U2U_OID,
+ else if (compare_oid(oid, oidlen, KRB5U2U_OID,
KRB5U2U_OID_LEN))
server->sec_kerberosu2u = true;
- if (compare_oid(oid, oidlen, KRB5_OID,
+ else if (compare_oid(oid, oidlen, KRB5_OID,
KRB5_OID_LEN))
server->sec_kerberos = true;
- if (compare_oid(oid, oidlen, NTLMSSP_OID,
+ else if (compare_oid(oid, oidlen, NTLMSSP_OID,
NTLMSSP_OID_LEN))
server->sec_ntlmssp = true;
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 709f229..35042d8 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -27,7 +27,6 @@
#include "md5.h"
#include "cifs_unicode.h"
#include "cifsproto.h"
-#include "ntlmssp.h"
#include <linux/ctype.h>
#include <linux/random.h>
@@ -43,43 +42,21 @@
unsigned char *p24);
static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu,
- struct TCP_Server_Info *server, char *signature)
+ const struct mac_key *key, char *signature)
{
- int rc;
+ struct MD5Context context;
- if (cifs_pdu == NULL || server == NULL || signature == NULL)
+ if ((cifs_pdu == NULL) || (signature == NULL) || (key == NULL))
return -EINVAL;
- if (!server->ntlmssp.sdescmd5) {
- cERROR(1,
- "cifs_calculate_signature: can't generate signature\n");
- return -1;
- }
+ cifs_MD5_init(&context);
+ cifs_MD5_update(&context, (char *)&key->data, key->len);
+ cifs_MD5_update(&context, cifs_pdu->Protocol, cifs_pdu->smb_buf_length);
- rc = crypto_shash_init(&server->ntlmssp.sdescmd5->shash);
- if (rc) {
- cERROR(1, "cifs_calculate_signature: oould not init md5\n");
- return rc;
- }
-
- if (server->secType == RawNTLMSSP)
- crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
- server->session_key.data.ntlmv2.key,
- CIFS_NTLMV2_SESSKEY_SIZE);
- else
- crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
- (char *)&server->session_key.data,
- server->session_key.len);
-
- crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
- cifs_pdu->Protocol, cifs_pdu->smb_buf_length);
-
- rc = crypto_shash_final(&server->ntlmssp.sdescmd5->shash, signature);
-
- return rc;
+ cifs_MD5_final(signature, &context);
+ return 0;
}
-
int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
__u32 *pexpected_response_sequence_number)
{
@@ -101,7 +78,8 @@
server->sequence_number++;
spin_unlock(&GlobalMid_Lock);
- rc = cifs_calculate_signature(cifs_pdu, server, smb_signature);
+ rc = cifs_calculate_signature(cifs_pdu, &server->mac_signing_key,
+ smb_signature);
if (rc)
memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
else
@@ -111,39 +89,21 @@
}
static int cifs_calc_signature2(const struct kvec *iov, int n_vec,
- struct TCP_Server_Info *server, char *signature)
+ const struct mac_key *key, char *signature)
{
+ struct MD5Context context;
int i;
- int rc;
- if (iov == NULL || server == NULL || signature == NULL)
+ if ((iov == NULL) || (signature == NULL) || (key == NULL))
return -EINVAL;
- if (!server->ntlmssp.sdescmd5) {
- cERROR(1, "cifs_calc_signature2: can't generate signature\n");
- return -1;
- }
-
- rc = crypto_shash_init(&server->ntlmssp.sdescmd5->shash);
- if (rc) {
- cERROR(1, "cifs_calc_signature2: oould not init md5\n");
- return rc;
- }
-
- if (server->secType == RawNTLMSSP)
- crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
- server->session_key.data.ntlmv2.key,
- CIFS_NTLMV2_SESSKEY_SIZE);
- else
- crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
- (char *)&server->session_key.data,
- server->session_key.len);
-
+ cifs_MD5_init(&context);
+ cifs_MD5_update(&context, (char *)&key->data, key->len);
for (i = 0; i < n_vec; i++) {
if (iov[i].iov_len == 0)
continue;
if (iov[i].iov_base == NULL) {
- cERROR(1, "cifs_calc_signature2: null iovec entry");
+ cERROR(1, "null iovec entry");
return -EIO;
}
/* The first entry includes a length field (which does not get
@@ -151,18 +111,18 @@
if (i == 0) {
if (iov[0].iov_len <= 8) /* cmd field at offset 9 */
break; /* nothing to sign or corrupt header */
- crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
- iov[i].iov_base + 4, iov[i].iov_len - 4);
+ cifs_MD5_update(&context, iov[0].iov_base+4,
+ iov[0].iov_len-4);
} else
- crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
- iov[i].iov_base, iov[i].iov_len);
+ cifs_MD5_update(&context, iov[i].iov_base, iov[i].iov_len);
}
- rc = crypto_shash_final(&server->ntlmssp.sdescmd5->shash, signature);
+ cifs_MD5_final(signature, &context);
- return rc;
+ return 0;
}
+
int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
__u32 *pexpected_response_sequence_number)
{
@@ -185,7 +145,8 @@
server->sequence_number++;
spin_unlock(&GlobalMid_Lock);
- rc = cifs_calc_signature2(iov, n_vec, server, smb_signature);
+ rc = cifs_calc_signature2(iov, n_vec, &server->mac_signing_key,
+ smb_signature);
if (rc)
memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
else
@@ -195,14 +156,14 @@
}
int cifs_verify_signature(struct smb_hdr *cifs_pdu,
- struct TCP_Server_Info *server,
+ const struct mac_key *mac_key,
__u32 expected_sequence_number)
{
- int rc;
+ unsigned int rc;
char server_response_sig[8];
char what_we_think_sig_should_be[20];
- if (cifs_pdu == NULL || server == NULL)
+ if ((cifs_pdu == NULL) || (mac_key == NULL))
return -EINVAL;
if (cifs_pdu->Command == SMB_COM_NEGOTIATE)
@@ -231,7 +192,7 @@
cpu_to_le32(expected_sequence_number);
cifs_pdu->Signature.Sequence.Reserved = 0;
- rc = cifs_calculate_signature(cifs_pdu, server,
+ rc = cifs_calculate_signature(cifs_pdu, mac_key,
what_we_think_sig_should_be);
if (rc)
@@ -248,7 +209,7 @@
}
/* We fill in key by putting in 40 byte array which was allocated by caller */
-int cifs_calculate_session_key(struct session_key *key, const char *rn,
+int cifs_calculate_mac_key(struct mac_key *key, const char *rn,
const char *password)
{
char temp_key[16];
@@ -306,52 +267,38 @@
{
int rc = 0;
int len;
- char nt_hash[CIFS_NTHASH_SIZE];
+ char nt_hash[16];
+ struct HMACMD5Context *pctxt;
wchar_t *user;
wchar_t *domain;
- wchar_t *server;
- if (!ses->server->ntlmssp.sdeschmacmd5) {
- cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n");
- return -1;
- }
+ pctxt = kmalloc(sizeof(struct HMACMD5Context), GFP_KERNEL);
+
+ if (pctxt == NULL)
+ return -ENOMEM;
/* calculate md4 hash of password */
E_md4hash(ses->password, nt_hash);
- crypto_shash_setkey(ses->server->ntlmssp.hmacmd5, nt_hash,
- CIFS_NTHASH_SIZE);
-
- rc = crypto_shash_init(&ses->server->ntlmssp.sdeschmacmd5->shash);
- if (rc) {
- cERROR(1, "calc_ntlmv2_hash: could not init hmacmd5\n");
- return rc;
- }
+ /* convert Domainname to unicode and uppercase */
+ hmac_md5_init_limK_to_64(nt_hash, 16, pctxt);
/* convert ses->userName to unicode and uppercase */
len = strlen(ses->userName);
user = kmalloc(2 + (len * 2), GFP_KERNEL);
- if (user == NULL) {
- cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n");
- rc = -ENOMEM;
+ if (user == NULL)
goto calc_exit_2;
- }
len = cifs_strtoUCS((__le16 *)user, ses->userName, len, nls_cp);
UniStrupr(user);
-
- crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash,
- (char *)user, 2 * len);
+ hmac_md5_update((char *)user, 2*len, pctxt);
/* convert ses->domainName to unicode and uppercase */
if (ses->domainName) {
len = strlen(ses->domainName);
domain = kmalloc(2 + (len * 2), GFP_KERNEL);
- if (domain == NULL) {
- cERROR(1, "calc_ntlmv2_hash: domain mem alloc failure");
- rc = -ENOMEM;
+ if (domain == NULL)
goto calc_exit_1;
- }
len = cifs_strtoUCS((__le16 *)domain, ses->domainName, len,
nls_cp);
/* the following line was removed since it didn't work well
@@ -359,292 +306,65 @@
Maybe converting the domain name earlier makes sense */
/* UniStrupr(domain); */
- crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash,
- (char *)domain, 2 * len);
+ hmac_md5_update((char *)domain, 2*len, pctxt);
kfree(domain);
- } else if (ses->serverName) {
- len = strlen(ses->serverName);
-
- server = kmalloc(2 + (len * 2), GFP_KERNEL);
- if (server == NULL) {
- cERROR(1, "calc_ntlmv2_hash: server mem alloc failure");
- rc = -ENOMEM;
- goto calc_exit_1;
- }
- len = cifs_strtoUCS((__le16 *)server, ses->serverName, len,
- nls_cp);
- /* the following line was removed since it didn't work well
- with lower cased domain name that passed as an option.
- Maybe converting the domain name earlier makes sense */
- /* UniStrupr(domain); */
-
- crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash,
- (char *)server, 2 * len);
-
- kfree(server);
}
-
- rc = crypto_shash_final(&ses->server->ntlmssp.sdeschmacmd5->shash,
- ses->server->ntlmv2_hash);
-
calc_exit_1:
kfree(user);
calc_exit_2:
/* BB FIXME what about bytes 24 through 40 of the signing key?
compare with the NTLM example */
+ hmac_md5_final(ses->server->ntlmv2_hash, pctxt);
+ kfree(pctxt);
return rc;
}
-static int
-find_domain_name(struct cifsSesInfo *ses)
-{
- int rc = 0;
- unsigned int attrsize;
- unsigned int type;
- unsigned char *blobptr;
- struct ntlmssp2_name *attrptr;
-
- if (ses->server->tiblob) {
- blobptr = ses->server->tiblob;
- attrptr = (struct ntlmssp2_name *) blobptr;
-
- while ((type = attrptr->type) != 0) {
- blobptr += 2; /* advance attr type */
- attrsize = attrptr->length;
- blobptr += 2; /* advance attr size */
- if (type == NTLMSSP_AV_NB_DOMAIN_NAME) {
- if (!ses->domainName) {
- ses->domainName =
- kmalloc(attrptr->length + 1,
- GFP_KERNEL);
- if (!ses->domainName)
- return -ENOMEM;
- cifs_from_ucs2(ses->domainName,
- (__le16 *)blobptr,
- attrptr->length,
- attrptr->length,
- load_nls_default(), false);
- }
- }
- blobptr += attrsize; /* advance attr value */
- attrptr = (struct ntlmssp2_name *) blobptr;
- }
- } else {
- ses->server->tilen = 2 * sizeof(struct ntlmssp2_name);
- ses->server->tiblob = kmalloc(ses->server->tilen, GFP_KERNEL);
- if (!ses->server->tiblob) {
- ses->server->tilen = 0;
- cERROR(1, "Challenge target info allocation failure");
- return -ENOMEM;
- }
- memset(ses->server->tiblob, 0x0, ses->server->tilen);
- attrptr = (struct ntlmssp2_name *) ses->server->tiblob;
- attrptr->type = cpu_to_le16(NTLMSSP_DOMAIN_TYPE);
- }
-
- return rc;
-}
-
-static int
-CalcNTLMv2_response(const struct TCP_Server_Info *server,
- char *v2_session_response)
-{
- int rc;
-
- if (!server->ntlmssp.sdeschmacmd5) {
- cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n");
- return -1;
- }
-
- crypto_shash_setkey(server->ntlmssp.hmacmd5, server->ntlmv2_hash,
- CIFS_HMAC_MD5_HASH_SIZE);
-
- rc = crypto_shash_init(&server->ntlmssp.sdeschmacmd5->shash);
- if (rc) {
- cERROR(1, "CalcNTLMv2_response: could not init hmacmd5");
- return rc;
- }
-
- memcpy(v2_session_response + CIFS_SERVER_CHALLENGE_SIZE,
- server->cryptKey, CIFS_SERVER_CHALLENGE_SIZE);
- crypto_shash_update(&server->ntlmssp.sdeschmacmd5->shash,
- v2_session_response + CIFS_SERVER_CHALLENGE_SIZE,
- sizeof(struct ntlmv2_resp) - CIFS_SERVER_CHALLENGE_SIZE);
-
- if (server->tilen)
- crypto_shash_update(&server->ntlmssp.sdeschmacmd5->shash,
- server->tiblob, server->tilen);
-
- rc = crypto_shash_final(&server->ntlmssp.sdeschmacmd5->shash,
- v2_session_response);
-
- return rc;
-}
-
-int
-setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf,
+void setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf,
const struct nls_table *nls_cp)
{
- int rc = 0;
+ int rc;
struct ntlmv2_resp *buf = (struct ntlmv2_resp *)resp_buf;
+ struct HMACMD5Context context;
buf->blob_signature = cpu_to_le32(0x00000101);
buf->reserved = 0;
buf->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
get_random_bytes(&buf->client_chal, sizeof(buf->client_chal));
buf->reserved2 = 0;
-
- if (!ses->domainName) {
- rc = find_domain_name(ses);
- if (rc) {
- cERROR(1, "could not get domain/server name rc %d", rc);
- return rc;
- }
- }
+ buf->names[0].type = cpu_to_le16(NTLMSSP_DOMAIN_TYPE);
+ buf->names[0].length = 0;
+ buf->names[1].type = 0;
+ buf->names[1].length = 0;
/* calculate buf->ntlmv2_hash */
rc = calc_ntlmv2_hash(ses, nls_cp);
- if (rc) {
+ if (rc)
cERROR(1, "could not get v2 hash rc %d", rc);
- return rc;
- }
- rc = CalcNTLMv2_response(ses->server, resp_buf);
- if (rc) {
- cERROR(1, "could not get v2 hash rc %d", rc);
- return rc;
- }
+ CalcNTLMv2_response(ses, resp_buf);
- if (!ses->server->ntlmssp.sdeschmacmd5) {
- cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n");
- return -1;
- }
+ /* now calculate the MAC key for NTLMv2 */
+ hmac_md5_init_limK_to_64(ses->server->ntlmv2_hash, 16, &context);
+ hmac_md5_update(resp_buf, 16, &context);
+ hmac_md5_final(ses->server->mac_signing_key.data.ntlmv2.key, &context);
- crypto_shash_setkey(ses->server->ntlmssp.hmacmd5,
- ses->server->ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);
-
- rc = crypto_shash_init(&ses->server->ntlmssp.sdeschmacmd5->shash);
- if (rc) {
- cERROR(1, "setup_ntlmv2_rsp: could not init hmacmd5\n");
- return rc;
- }
-
- crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash,
- resp_buf, CIFS_HMAC_MD5_HASH_SIZE);
-
- rc = crypto_shash_final(&ses->server->ntlmssp.sdeschmacmd5->shash,
- ses->server->session_key.data.ntlmv2.key);
-
- memcpy(&ses->server->session_key.data.ntlmv2.resp, resp_buf,
- sizeof(struct ntlmv2_resp));
- ses->server->session_key.len = 16 + sizeof(struct ntlmv2_resp);
-
- return rc;
+ memcpy(&ses->server->mac_signing_key.data.ntlmv2.resp, resp_buf,
+ sizeof(struct ntlmv2_resp));
+ ses->server->mac_signing_key.len = 16 + sizeof(struct ntlmv2_resp);
}
-int
-calc_seckey(struct TCP_Server_Info *server)
+void CalcNTLMv2_response(const struct cifsSesInfo *ses,
+ char *v2_session_response)
{
- int rc;
- unsigned char sec_key[CIFS_NTLMV2_SESSKEY_SIZE];
- struct crypto_blkcipher *tfm_arc4;
- struct scatterlist sgin, sgout;
- struct blkcipher_desc desc;
+ struct HMACMD5Context context;
+ /* rest of v2 struct already generated */
+ memcpy(v2_session_response + 8, ses->server->cryptKey, 8);
+ hmac_md5_init_limK_to_64(ses->server->ntlmv2_hash, 16, &context);
- get_random_bytes(sec_key, CIFS_NTLMV2_SESSKEY_SIZE);
+ hmac_md5_update(v2_session_response+8,
+ sizeof(struct ntlmv2_resp) - 8, &context);
- tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)",
- 0, CRYPTO_ALG_ASYNC);
- if (!tfm_arc4 || IS_ERR(tfm_arc4)) {
- cERROR(1, "could not allocate " "master crypto API arc4\n");
- return 1;
- }
-
- desc.tfm = tfm_arc4;
-
- crypto_blkcipher_setkey(tfm_arc4,
- server->session_key.data.ntlmv2.key, CIFS_CPHTXT_SIZE);
- sg_init_one(&sgin, sec_key, CIFS_CPHTXT_SIZE);
- sg_init_one(&sgout, server->ntlmssp.ciphertext, CIFS_CPHTXT_SIZE);
- rc = crypto_blkcipher_encrypt(&desc, &sgout, &sgin, CIFS_CPHTXT_SIZE);
-
- if (!rc)
- memcpy(server->session_key.data.ntlmv2.key,
- sec_key, CIFS_NTLMV2_SESSKEY_SIZE);
-
- crypto_free_blkcipher(tfm_arc4);
-
- return 0;
-}
-
-void
-cifs_crypto_shash_release(struct TCP_Server_Info *server)
-{
- if (server->ntlmssp.md5)
- crypto_free_shash(server->ntlmssp.md5);
-
- if (server->ntlmssp.hmacmd5)
- crypto_free_shash(server->ntlmssp.hmacmd5);
-
- kfree(server->ntlmssp.sdeschmacmd5);
-
- kfree(server->ntlmssp.sdescmd5);
-}
-
-int
-cifs_crypto_shash_allocate(struct TCP_Server_Info *server)
-{
- int rc;
- unsigned int size;
-
- server->ntlmssp.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0);
- if (!server->ntlmssp.hmacmd5 ||
- IS_ERR(server->ntlmssp.hmacmd5)) {
- cERROR(1, "could not allocate crypto hmacmd5\n");
- return 1;
- }
-
- server->ntlmssp.md5 = crypto_alloc_shash("md5", 0, 0);
- if (!server->ntlmssp.md5 || IS_ERR(server->ntlmssp.md5)) {
- cERROR(1, "could not allocate crypto md5\n");
- rc = 1;
- goto cifs_crypto_shash_allocate_ret1;
- }
-
- size = sizeof(struct shash_desc) +
- crypto_shash_descsize(server->ntlmssp.hmacmd5);
- server->ntlmssp.sdeschmacmd5 = kmalloc(size, GFP_KERNEL);
- if (!server->ntlmssp.sdeschmacmd5) {
- cERROR(1, "cifs_crypto_shash_allocate: can't alloc hmacmd5\n");
- rc = -ENOMEM;
- goto cifs_crypto_shash_allocate_ret2;
- }
- server->ntlmssp.sdeschmacmd5->shash.tfm = server->ntlmssp.hmacmd5;
- server->ntlmssp.sdeschmacmd5->shash.flags = 0x0;
-
-
- size = sizeof(struct shash_desc) +
- crypto_shash_descsize(server->ntlmssp.md5);
- server->ntlmssp.sdescmd5 = kmalloc(size, GFP_KERNEL);
- if (!server->ntlmssp.sdescmd5) {
- cERROR(1, "cifs_crypto_shash_allocate: can't alloc md5\n");
- rc = -ENOMEM;
- goto cifs_crypto_shash_allocate_ret3;
- }
- server->ntlmssp.sdescmd5->shash.tfm = server->ntlmssp.md5;
- server->ntlmssp.sdescmd5->shash.flags = 0x0;
-
- return 0;
-
-cifs_crypto_shash_allocate_ret3:
- kfree(server->ntlmssp.sdeschmacmd5);
-
-cifs_crypto_shash_allocate_ret2:
- crypto_free_shash(server->ntlmssp.md5);
-
-cifs_crypto_shash_allocate_ret1:
- crypto_free_shash(server->ntlmssp.hmacmd5);
-
- return rc;
+ hmac_md5_final(v2_session_response, &context);
+/* cifs_dump_mem("v2_sess_rsp: ", v2_session_response, 32); */
}
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index c9d0cfc..0cdfb8c 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -25,9 +25,6 @@
#include <linux/workqueue.h>
#include "cifs_fs_sb.h"
#include "cifsacl.h"
-#include <crypto/internal/hash.h>
-#include <linux/scatterlist.h>
-
/*
* The sizes of various internal tables and strings
*/
@@ -100,7 +97,7 @@
/* Netbios frames protocol not supported at this time */
};
-struct session_key {
+struct mac_key {
unsigned int len;
union {
char ntlm[CIFS_SESS_KEY_SIZE + 16];
@@ -123,21 +120,6 @@
struct cifs_ace *aces;
};
-struct sdesc {
- struct shash_desc shash;
- char ctx[];
-};
-
-struct ntlmssp_auth {
- __u32 client_flags;
- __u32 server_flags;
- unsigned char ciphertext[CIFS_CPHTXT_SIZE];
- struct crypto_shash *hmacmd5;
- struct crypto_shash *md5;
- struct sdesc *sdeschmacmd5;
- struct sdesc *sdescmd5;
-};
-
/*
*****************************************************************
* Except the CIFS PDUs themselves all the
@@ -200,14 +182,11 @@
/* 16th byte of RFC1001 workstation name is always null */
char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
__u32 sequence_number; /* needed for CIFS PDU signature */
- struct session_key session_key;
+ struct mac_key mac_signing_key;
char ntlmv2_hash[16];
unsigned long lstrp; /* when we got last response from this server */
u16 dialect; /* dialect index that server chose */
/* extended security flavors that server supports */
- unsigned int tilen; /* length of the target info blob */
- unsigned char *tiblob; /* target info blob in challenge response */
- struct ntlmssp_auth ntlmssp; /* various keys, ciphers, flags */
bool sec_kerberos; /* supports plain Kerberos */
bool sec_mskerberos; /* supports legacy MS Kerberos */
bool sec_kerberosu2u; /* supports U2U Kerberos */
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index 320e0fd..14d036d 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -134,12 +134,6 @@
* Size of the session key (crypto key encrypted with the password
*/
#define CIFS_SESS_KEY_SIZE (24)
-#define CIFS_CLIENT_CHALLENGE_SIZE (8)
-#define CIFS_SERVER_CHALLENGE_SIZE (8)
-#define CIFS_HMAC_MD5_HASH_SIZE (16)
-#define CIFS_CPHTXT_SIZE (16)
-#define CIFS_NTLMV2_SESSKEY_SIZE (16)
-#define CIFS_NTHASH_SIZE (16)
/*
* Maximum user name length
@@ -669,6 +663,7 @@
__le64 time;
__u64 client_chal; /* random */
__u32 reserved2;
+ struct ntlmssp2_name names[2];
/* array of name entries could follow ending in minimum 4 byte struct */
} __attribute__((packed));
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 1378d91..1d60c65 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -87,8 +87,9 @@
extern int decode_negTokenInit(unsigned char *security_blob, int length,
struct TCP_Server_Info *server);
extern int cifs_convert_address(struct sockaddr *dst, const char *src, int len);
+extern int cifs_set_port(struct sockaddr *addr, const unsigned short int port);
extern int cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len,
- unsigned short int port);
+ const unsigned short int port);
extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr);
extern void header_assemble(struct smb_hdr *, char /* command */ ,
const struct cifsTconInfo *, int /* length of
@@ -361,15 +362,13 @@
extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *,
__u32 *);
extern int cifs_verify_signature(struct smb_hdr *,
- struct TCP_Server_Info *server,
+ const struct mac_key *mac_key,
__u32 expected_sequence_number);
-extern int cifs_calculate_session_key(struct session_key *key, const char *rn,
+extern int cifs_calculate_mac_key(struct mac_key *key, const char *rn,
const char *pass);
-extern int setup_ntlmv2_rsp(struct cifsSesInfo *, char *,
+extern void CalcNTLMv2_response(const struct cifsSesInfo *, char *);
+extern void setup_ntlmv2_rsp(struct cifsSesInfo *, char *,
const struct nls_table *);
-extern int cifs_crypto_shash_allocate(struct TCP_Server_Info *);
-extern void cifs_crypto_shash_release(struct TCP_Server_Info *);
-extern int calc_seckey(struct TCP_Server_Info *);
#ifdef CONFIG_CIFS_WEAK_PW_HASH
extern void calc_lanman_hash(const char *password, const char *cryptkey,
bool encrypt, char *lnm_session_key);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 4bda920..c65c341 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -604,14 +604,11 @@
else
rc = -EINVAL;
- if (server->secType == Kerberos) {
- if (!server->sec_kerberos &&
- !server->sec_mskerberos)
- rc = -EOPNOTSUPP;
- } else if (server->secType == RawNTLMSSP) {
- if (!server->sec_ntlmssp)
- rc = -EOPNOTSUPP;
- } else
+ if (server->sec_kerberos || server->sec_mskerberos)
+ server->secType = Kerberos;
+ else if (server->sec_ntlmssp)
+ server->secType = RawNTLMSSP;
+ else
rc = -EOPNOTSUPP;
}
} else
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index ec0ea4a..88c84a3 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -400,7 +400,9 @@
cFYI(1, "call to reconnect done");
csocket = server->ssocket;
continue;
- } else if ((length == -ERESTARTSYS) || (length == -EAGAIN)) {
+ } else if (length == -ERESTARTSYS ||
+ length == -EAGAIN ||
+ length == -EINTR) {
msleep(1); /* minimum sleep to prevent looping
allowing socket to clear and app threads to set
tcpStatus CifsNeedReconnect if server hung */
@@ -414,18 +416,6 @@
} else
continue;
} else if (length <= 0) {
- if (server->tcpStatus == CifsNew) {
- cFYI(1, "tcp session abend after SMBnegprot");
- /* some servers kill the TCP session rather than
- returning an SMB negprot error, in which
- case reconnecting here is not going to help,
- and so simply return error to mount */
- break;
- }
- if (!try_to_freeze() && (length == -EINTR)) {
- cFYI(1, "cifsd thread killed");
- break;
- }
cFYI(1, "Reconnect after unexpected peek error %d",
length);
cifs_reconnect(server);
@@ -466,27 +456,19 @@
an error on SMB negprot response */
cFYI(1, "Negative RFC1002 Session Response Error 0x%x)",
pdu_length);
- if (server->tcpStatus == CifsNew) {
- /* if nack on negprot (rather than
- ret of smb negprot error) reconnecting
- not going to help, ret error to mount */
- break;
- } else {
- /* give server a second to
- clean up before reconnect attempt */
- msleep(1000);
- /* always try 445 first on reconnect
- since we get NACK on some if we ever
- connected to port 139 (the NACK is
- since we do not begin with RFC1001
- session initialize frame) */
- server->addr.sockAddr.sin_port =
- htons(CIFS_PORT);
- cifs_reconnect(server);
- csocket = server->ssocket;
- wake_up(&server->response_q);
- continue;
- }
+ /* give server a second to clean up */
+ msleep(1000);
+ /* always try 445 first on reconnect since we get NACK
+ * on some if we ever connected to port 139 (the NACK
+ * is since we do not begin with RFC1001 session
+ * initialize frame)
+ */
+ cifs_set_port((struct sockaddr *)
+ &server->addr.sockAddr, CIFS_PORT);
+ cifs_reconnect(server);
+ csocket = server->ssocket;
+ wake_up(&server->response_q);
+ continue;
} else if (temp != (char) 0) {
cERROR(1, "Unknown RFC 1002 frame");
cifs_dump_mem(" Received Data: ", (char *)smb_buffer,
@@ -522,8 +504,7 @@
total_read += length) {
length = kernel_recvmsg(csocket, &smb_msg, &iov, 1,
pdu_length - total_read, 0);
- if ((server->tcpStatus == CifsExiting) ||
- (length == -EINTR)) {
+ if (server->tcpStatus == CifsExiting) {
/* then will exit */
reconnect = 2;
break;
@@ -534,8 +515,9 @@
/* Now we will reread sock */
reconnect = 1;
break;
- } else if ((length == -ERESTARTSYS) ||
- (length == -EAGAIN)) {
+ } else if (length == -ERESTARTSYS ||
+ length == -EAGAIN ||
+ length == -EINTR) {
msleep(1); /* minimum sleep to prevent looping,
allowing socket to clear and app
threads to set tcpStatus
@@ -1708,7 +1690,6 @@
CIFSSMBLogoff(xid, ses);
_FreeXid(xid);
}
- cifs_crypto_shash_release(server);
sesInfoFree(ses);
cifs_put_tcp_session(server);
}
@@ -1725,9 +1706,6 @@
if (ses) {
cFYI(1, "Existing smb sess found (status=%d)", ses->status);
- /* existing SMB ses has a server reference already */
- cifs_put_tcp_session(server);
-
mutex_lock(&ses->session_mutex);
rc = cifs_negotiate_protocol(xid, ses);
if (rc) {
@@ -1750,6 +1728,9 @@
}
}
mutex_unlock(&ses->session_mutex);
+
+ /* existing SMB ses has a server reference already */
+ cifs_put_tcp_session(server);
FreeXid(xid);
return ses;
}
@@ -1788,23 +1769,13 @@
ses->linux_uid = volume_info->linux_uid;
ses->overrideSecFlg = volume_info->secFlg;
- rc = cifs_crypto_shash_allocate(server);
- if (rc) {
- cERROR(1, "could not setup hash structures rc %d", rc);
- goto get_ses_fail;
- }
- server->tilen = 0;
- server->tiblob = NULL;
-
mutex_lock(&ses->session_mutex);
rc = cifs_negotiate_protocol(xid, ses);
if (!rc)
rc = cifs_setup_session(xid, ses, volume_info->local_nls);
mutex_unlock(&ses->session_mutex);
- if (rc) {
- cifs_crypto_shash_release(ses->server);
+ if (rc)
goto get_ses_fail;
- }
/* success, put it on the list */
write_lock(&cifs_tcp_ses_lock);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 86a164f..93f77d4 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1462,29 +1462,18 @@
{
char *fromName = NULL;
char *toName = NULL;
- struct cifs_sb_info *cifs_sb_source;
- struct cifs_sb_info *cifs_sb_target;
+ struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *tcon;
FILE_UNIX_BASIC_INFO *info_buf_source = NULL;
FILE_UNIX_BASIC_INFO *info_buf_target;
int xid, rc, tmprc;
- cifs_sb_target = CIFS_SB(target_dir->i_sb);
- cifs_sb_source = CIFS_SB(source_dir->i_sb);
- tcon = cifs_sb_source->tcon;
+ cifs_sb = CIFS_SB(source_dir->i_sb);
+ tcon = cifs_sb->tcon;
xid = GetXid();
/*
- * BB: this might be allowed if same server, but different share.
- * Consider adding support for this
- */
- if (tcon != cifs_sb_target->tcon) {
- rc = -EXDEV;
- goto cifs_rename_exit;
- }
-
- /*
* we already have the rename sem so we do not need to
* grab it again here to protect the path integrity
*/
@@ -1519,17 +1508,16 @@
info_buf_target = info_buf_source + 1;
tmprc = CIFSSMBUnixQPathInfo(xid, tcon, fromName,
info_buf_source,
- cifs_sb_source->local_nls,
- cifs_sb_source->mnt_cifs_flags &
+ cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (tmprc != 0)
goto unlink_target;
- tmprc = CIFSSMBUnixQPathInfo(xid, tcon,
- toName, info_buf_target,
- cifs_sb_target->local_nls,
- /* remap based on source sb */
- cifs_sb_source->mnt_cifs_flags &
+ tmprc = CIFSSMBUnixQPathInfo(xid, tcon, toName,
+ info_buf_target,
+ cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (tmprc == 0 && (info_buf_source->UniqueId ==
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index f978511..9aad47a 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -206,24 +206,28 @@
}
int
+cifs_set_port(struct sockaddr *addr, const unsigned short int port)
+{
+ switch (addr->sa_family) {
+ case AF_INET:
+ ((struct sockaddr_in *)addr)->sin_port = htons(port);
+ break;
+ case AF_INET6:
+ ((struct sockaddr_in6 *)addr)->sin6_port = htons(port);
+ break;
+ default:
+ return 0;
+ }
+ return 1;
+}
+
+int
cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len,
const unsigned short int port)
{
if (!cifs_convert_address(dst, src, len))
return 0;
-
- switch (dst->sa_family) {
- case AF_INET:
- ((struct sockaddr_in *)dst)->sin_port = htons(port);
- break;
- case AF_INET6:
- ((struct sockaddr_in6 *)dst)->sin6_port = htons(port);
- break;
- default:
- return 0;
- }
-
- return 1;
+ return cifs_set_port(dst, port);
}
/*****************************************************************************
diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
index 1db0f07..49c9a4e 100644
--- a/fs/cifs/ntlmssp.h
+++ b/fs/cifs/ntlmssp.h
@@ -61,19 +61,6 @@
#define NTLMSSP_NEGOTIATE_KEY_XCH 0x40000000
#define NTLMSSP_NEGOTIATE_56 0x80000000
-/* Define AV Pair Field IDs */
-#define NTLMSSP_AV_EOL 0
-#define NTLMSSP_AV_NB_COMPUTER_NAME 1
-#define NTLMSSP_AV_NB_DOMAIN_NAME 2
-#define NTLMSSP_AV_DNS_COMPUTER_NAME 3
-#define NTLMSSP_AV_DNS_DOMAIN_NAME 4
-#define NTLMSSP_AV_DNS_TREE_NAME 5
-#define NTLMSSP_AV_FLAGS 6
-#define NTLMSSP_AV_TIMESTAMP 7
-#define NTLMSSP_AV_RESTRICTION 8
-#define NTLMSSP_AV_TARGET_NAME 9
-#define NTLMSSP_AV_CHANNEL_BINDINGS 10
-
/* Although typedefs are not commonly used for structure definitions */
/* in the Linux kernel, in this particular case they are useful */
/* to more closely match the standards document for NTLMSSP from */
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 795095f..0a57cb7 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -383,9 +383,6 @@
static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
struct cifsSesInfo *ses)
{
- unsigned int tioffset; /* challeng message target info area */
- unsigned int tilen; /* challeng message target info area length */
-
CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr;
if (blob_len < sizeof(CHALLENGE_MESSAGE)) {
@@ -408,20 +405,6 @@
/* BB spec says that if AvId field of MsvAvTimestamp is populated then
we must set the MIC field of the AUTHENTICATE_MESSAGE */
- ses->server->ntlmssp.server_flags = le32_to_cpu(pblob->NegotiateFlags);
-
- tioffset = cpu_to_le16(pblob->TargetInfoArray.BufferOffset);
- tilen = cpu_to_le16(pblob->TargetInfoArray.Length);
- ses->server->tilen = tilen;
- if (tilen) {
- ses->server->tiblob = kmalloc(tilen, GFP_KERNEL);
- if (!ses->server->tiblob) {
- cERROR(1, "Challenge target info allocation failure");
- return -ENOMEM;
- }
- memcpy(ses->server->tiblob, bcc_ptr + tioffset, tilen);
- }
-
return 0;
}
@@ -442,13 +425,12 @@
/* BB is NTLMV2 session security format easier to use here? */
flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET |
NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
- NTLMSSP_NEGOTIATE_NTLM;
+ NTLMSSP_NEGOTIATE_NT_ONLY | NTLMSSP_NEGOTIATE_NTLM;
if (ses->server->secMode &
- (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
- flags |= NTLMSSP_NEGOTIATE_SIGN |
- NTLMSSP_NEGOTIATE_KEY_XCH |
- NTLMSSP_NEGOTIATE_EXTENDED_SEC;
- }
+ (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+ flags |= NTLMSSP_NEGOTIATE_SIGN;
+ if (ses->server->secMode & SECMODE_SIGN_REQUIRED)
+ flags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN;
sec_blob->NegotiateFlags |= cpu_to_le32(flags);
@@ -469,12 +451,10 @@
struct cifsSesInfo *ses,
const struct nls_table *nls_cp, bool first)
{
- int rc;
- unsigned int size;
AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer;
__u32 flags;
unsigned char *tmp;
- struct ntlmv2_resp ntlmv2_response = {};
+ char ntlm_session_key[CIFS_SESS_KEY_SIZE];
memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
sec_blob->MessageType = NtLmAuthenticate;
@@ -497,25 +477,19 @@
sec_blob->LmChallengeResponse.Length = 0;
sec_blob->LmChallengeResponse.MaximumLength = 0;
- sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
- rc = setup_ntlmv2_rsp(ses, (char *)&ntlmv2_response, nls_cp);
- if (rc) {
- cERROR(1, "error rc: %d during ntlmssp ntlmv2 setup", rc);
- goto setup_ntlmv2_ret;
- }
- size = sizeof(struct ntlmv2_resp);
- memcpy(tmp, (char *)&ntlmv2_response, size);
- tmp += size;
- if (ses->server->tilen > 0) {
- memcpy(tmp, ses->server->tiblob, ses->server->tilen);
- tmp += ses->server->tilen;
- } else
- ses->server->tilen = 0;
+ /* calculate session key, BB what about adding similar ntlmv2 path? */
+ SMBNTencrypt(ses->password, ses->server->cryptKey, ntlm_session_key);
+ if (first)
+ cifs_calculate_mac_key(&ses->server->mac_signing_key,
+ ntlm_session_key, ses->password);
- sec_blob->NtChallengeResponse.Length = cpu_to_le16(size +
- ses->server->tilen);
+ memcpy(tmp, ntlm_session_key, CIFS_SESS_KEY_SIZE);
+ sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
+ sec_blob->NtChallengeResponse.Length = cpu_to_le16(CIFS_SESS_KEY_SIZE);
sec_blob->NtChallengeResponse.MaximumLength =
- cpu_to_le16(size + ses->server->tilen);
+ cpu_to_le16(CIFS_SESS_KEY_SIZE);
+
+ tmp += CIFS_SESS_KEY_SIZE;
if (ses->domainName == NULL) {
sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
@@ -527,6 +501,7 @@
len = cifs_strtoUCS((__le16 *)tmp, ses->domainName,
MAX_USERNAME_SIZE, nls_cp);
len *= 2; /* unicode is 2 bytes each */
+ len += 2; /* trailing null */
sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
sec_blob->DomainName.Length = cpu_to_le16(len);
sec_blob->DomainName.MaximumLength = cpu_to_le16(len);
@@ -543,6 +518,7 @@
len = cifs_strtoUCS((__le16 *)tmp, ses->userName,
MAX_USERNAME_SIZE, nls_cp);
len *= 2; /* unicode is 2 bytes each */
+ len += 2; /* trailing null */
sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
sec_blob->UserName.Length = cpu_to_le16(len);
sec_blob->UserName.MaximumLength = cpu_to_le16(len);
@@ -554,26 +530,9 @@
sec_blob->WorkstationName.MaximumLength = 0;
tmp += 2;
- if ((ses->server->ntlmssp.server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) &&
- !calc_seckey(ses->server)) {
- memcpy(tmp, ses->server->ntlmssp.ciphertext, CIFS_CPHTXT_SIZE);
- sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
- sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
- sec_blob->SessionKey.MaximumLength =
- cpu_to_le16(CIFS_CPHTXT_SIZE);
- tmp += CIFS_CPHTXT_SIZE;
- } else {
- sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
- sec_blob->SessionKey.Length = 0;
- sec_blob->SessionKey.MaximumLength = 0;
- }
-
- ses->server->sequence_number = 0;
-
-setup_ntlmv2_ret:
- if (ses->server->tilen > 0)
- kfree(ses->server->tiblob);
-
+ sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
+ sec_blob->SessionKey.Length = 0;
+ sec_blob->SessionKey.MaximumLength = 0;
return tmp - pbuffer;
}
@@ -587,14 +546,15 @@
return;
}
-static int setup_ntlmssp_auth_req(char *ntlmsspblob,
+static int setup_ntlmssp_auth_req(SESSION_SETUP_ANDX *pSMB,
struct cifsSesInfo *ses,
const struct nls_table *nls, bool first_time)
{
int bloblen;
- bloblen = build_ntlmssp_auth_blob(ntlmsspblob, ses, nls,
+ bloblen = build_ntlmssp_auth_blob(&pSMB->req.SecurityBlob[0], ses, nls,
first_time);
+ pSMB->req.SecurityBlobLength = cpu_to_le16(bloblen);
return bloblen;
}
@@ -730,7 +690,7 @@
if (first_time) /* should this be moved into common code
with similar ntlmv2 path? */
- cifs_calculate_session_key(&ses->server->session_key,
+ cifs_calculate_mac_key(&ses->server->mac_signing_key,
ntlm_session_key, ses->password);
/* copy session key */
@@ -769,21 +729,12 @@
cpu_to_le16(sizeof(struct ntlmv2_resp));
/* calculate session key */
- rc = setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp);
- if (rc) {
- kfree(v2_sess_key);
- goto ssetup_exit;
- }
+ setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp);
/* FIXME: calculate MAC key */
memcpy(bcc_ptr, (char *)v2_sess_key,
sizeof(struct ntlmv2_resp));
bcc_ptr += sizeof(struct ntlmv2_resp);
kfree(v2_sess_key);
- if (ses->server->tilen > 0) {
- memcpy(bcc_ptr, ses->server->tiblob,
- ses->server->tilen);
- bcc_ptr += ses->server->tilen;
- }
if (ses->capabilities & CAP_UNICODE) {
if (iov[0].iov_len % 2) {
*bcc_ptr = 0;
@@ -814,15 +765,15 @@
}
/* bail out if key is too long */
if (msg->sesskey_len >
- sizeof(ses->server->session_key.data.krb5)) {
+ sizeof(ses->server->mac_signing_key.data.krb5)) {
cERROR(1, "Kerberos signing key too long (%u bytes)",
msg->sesskey_len);
rc = -EOVERFLOW;
goto ssetup_exit;
}
if (first_time) {
- ses->server->session_key.len = msg->sesskey_len;
- memcpy(ses->server->session_key.data.krb5,
+ ses->server->mac_signing_key.len = msg->sesskey_len;
+ memcpy(ses->server->mac_signing_key.data.krb5,
msg->data, msg->sesskey_len);
}
pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
@@ -864,28 +815,12 @@
if (phase == NtLmNegotiate) {
setup_ntlmssp_neg_req(pSMB, ses);
iov[1].iov_len = sizeof(NEGOTIATE_MESSAGE);
- iov[1].iov_base = &pSMB->req.SecurityBlob[0];
} else if (phase == NtLmAuthenticate) {
int blob_len;
- char *ntlmsspblob;
-
- ntlmsspblob = kmalloc(5 *
- sizeof(struct _AUTHENTICATE_MESSAGE),
- GFP_KERNEL);
- if (!ntlmsspblob) {
- cERROR(1, "Can't allocate NTLMSSP");
- rc = -ENOMEM;
- goto ssetup_exit;
- }
-
- blob_len = setup_ntlmssp_auth_req(ntlmsspblob,
- ses,
- nls_cp,
- first_time);
+ blob_len = setup_ntlmssp_auth_req(pSMB, ses,
+ nls_cp,
+ first_time);
iov[1].iov_len = blob_len;
- iov[1].iov_base = ntlmsspblob;
- pSMB->req.SecurityBlobLength =
- cpu_to_le16(blob_len);
/* Make sure that we tell the server that we
are using the uid that it just gave us back
on the response (challenge) */
@@ -895,6 +830,7 @@
rc = -ENOSYS;
goto ssetup_exit;
}
+ iov[1].iov_base = &pSMB->req.SecurityBlob[0];
/* unicode strings must be word aligned */
if ((iov[0].iov_len + iov[1].iov_len) % 2) {
*bcc_ptr = 0;
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index e0588cd..82f78c4 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -543,7 +543,7 @@
(ses->server->secMode & (SECMODE_SIGN_REQUIRED |
SECMODE_SIGN_ENABLED))) {
rc = cifs_verify_signature(midQ->resp_buf,
- ses->server,
+ &ses->server->mac_signing_key,
midQ->sequence_number+1);
if (rc) {
cERROR(1, "Unexpected SMB signature");
@@ -731,7 +731,7 @@
(ses->server->secMode & (SECMODE_SIGN_REQUIRED |
SECMODE_SIGN_ENABLED))) {
rc = cifs_verify_signature(out_buf,
- ses->server,
+ &ses->server->mac_signing_key,
midQ->sequence_number+1);
if (rc) {
cERROR(1, "Unexpected SMB signature");
@@ -981,7 +981,7 @@
(ses->server->secMode & (SECMODE_SIGN_REQUIRED |
SECMODE_SIGN_ENABLED))) {
rc = cifs_verify_signature(out_buf,
- ses->server,
+ &ses->server->mac_signing_key,
midQ->sequence_number+1);
if (rc) {
cERROR(1, "Unexpected SMB signature");
diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c
index de89645..116af75 100644
--- a/fs/coda/psdev.c
+++ b/fs/coda/psdev.c
@@ -184,8 +184,8 @@
}
/* adjust outsize. is this useful ?? */
- req->uc_outSize = nbytes;
- req->uc_flags |= REQ_WRITE;
+ req->uc_outSize = nbytes;
+ req->uc_flags |= CODA_REQ_WRITE;
count = nbytes;
/* Convert filedescriptor into a file handle */
diff --git a/fs/compat.c b/fs/compat.c
index 718c706..0644a15 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1153,7 +1153,7 @@
{
compat_ssize_t tot_len;
struct iovec iovstack[UIO_FASTIOV];
- struct iovec *iov;
+ struct iovec *iov = iovstack;
ssize_t ret;
io_fn_t fn;
iov_fn_t fnv;
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 51f270b..48d74c7 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -634,7 +634,7 @@
int ret = 0;
if (dio->bio) {
- loff_t cur_offset = dio->block_in_file << dio->blkbits;
+ loff_t cur_offset = dio->cur_page_fs_offset;
loff_t bio_next_offset = dio->logical_offset_in_bio +
dio->bio->bi_size;
@@ -659,7 +659,7 @@
* Submit now if the underlying fs is about to perform a
* metadata read
*/
- if (dio->boundary)
+ else if (dio->boundary)
dio_bio_submit(dio);
}
diff --git a/fs/exec.c b/fs/exec.c
index 2d94552..828dd24 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -376,6 +376,9 @@
argv++;
if (i++ >= max)
return -E2BIG;
+
+ if (fatal_signal_pending(current))
+ return -ERESTARTNOHAND;
cond_resched();
}
}
@@ -419,6 +422,12 @@
while (len > 0) {
int offset, bytes_to_copy;
+ if (fatal_signal_pending(current)) {
+ ret = -ERESTARTNOHAND;
+ goto out;
+ }
+ cond_resched();
+
offset = pos % PAGE_SIZE;
if (offset == 0)
offset = PAGE_SIZE;
@@ -594,6 +603,11 @@
#else
stack_top = arch_align_stack(stack_top);
stack_top = PAGE_ALIGN(stack_top);
+
+ if (unlikely(stack_top < mmap_min_addr) ||
+ unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
+ return -ENOMEM;
+
stack_shift = vma->vm_end - stack_top;
bprm->p -= stack_shift;
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 6769fd0..f8cc34f 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -769,11 +769,15 @@
static int __init fcntl_init(void)
{
- /* please add new bits here to ensure allocation uniqueness */
- BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
+ /*
+ * Please add new bits here to ensure allocation uniqueness.
+ * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
+ * is defined as O_NONBLOCK on some platforms and not on others.
+ */
+ BUILD_BUG_ON(18 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
O_RDONLY | O_WRONLY | O_RDWR |
O_CREAT | O_EXCL | O_NOCTTY |
- O_TRUNC | O_APPEND | O_NONBLOCK |
+ O_TRUNC | O_APPEND | /* O_NONBLOCK | */
__O_SYNC | O_DSYNC | FASYNC |
O_DIRECT | O_LARGEFILE | O_DIRECTORY |
O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 7d9d06b..5581122 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -52,8 +52,6 @@
#define CREATE_TRACE_POINTS
#include <trace/events/writeback.h>
-#define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info)
-
/*
* We don't actually have pdflush, but this one is exported though /proc...
*/
@@ -71,6 +69,27 @@
return test_bit(BDI_writeback_running, &bdi->state);
}
+static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+ struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
+
+ /*
+ * For inodes on standard filesystems, we use superblock's bdi. For
+ * inodes on virtual filesystems, we want to use inode mapping's bdi
+ * because they can possibly point to something useful (think about
+ * block_dev filesystem).
+ */
+ if (sb->s_bdi && sb->s_bdi != &noop_backing_dev_info) {
+ /* Some device inodes could play dirty tricks. Catch them... */
+ WARN(bdi != sb->s_bdi && bdi_cap_writeback_dirty(bdi),
+ "Dirtiable inode bdi %s != sb bdi %s\n",
+ bdi->name, sb->s_bdi->name);
+ return sb->s_bdi;
+ }
+ return bdi;
+}
+
static void bdi_queue_work(struct backing_dev_info *bdi,
struct wb_writeback_work *work)
{
@@ -808,7 +827,7 @@
wb->last_active = jiffies;
set_current_state(TASK_INTERRUPTIBLE);
- if (!list_empty(&bdi->work_list)) {
+ if (!list_empty(&bdi->work_list) || kthread_should_stop()) {
__set_current_state(TASK_RUNNING);
continue;
}
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 69ad053..d367af1 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -276,7 +276,7 @@
* Called with fc->lock, unlocks it
*/
static void request_end(struct fuse_conn *fc, struct fuse_req *req)
-__releases(&fc->lock)
+__releases(fc->lock)
{
void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
req->end = NULL;
@@ -306,8 +306,8 @@
static void wait_answer_interruptible(struct fuse_conn *fc,
struct fuse_req *req)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
{
if (signal_pending(current))
return;
@@ -325,8 +325,8 @@
}
static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
{
if (!fc->no_interrupt) {
/* Any signal may interrupt this */
@@ -905,8 +905,8 @@
/* Wait until a request is available on the pending list */
static void request_wait(struct fuse_conn *fc)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
{
DECLARE_WAITQUEUE(wait, current);
@@ -934,7 +934,7 @@
*/
static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
size_t nbytes, struct fuse_req *req)
-__releases(&fc->lock)
+__releases(fc->lock)
{
struct fuse_in_header ih;
struct fuse_interrupt_in arg;
@@ -1720,8 +1720,8 @@
* This function releases and reacquires fc->lock
*/
static void end_requests(struct fuse_conn *fc, struct list_head *head)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
{
while (!list_empty(head)) {
struct fuse_req *req;
@@ -1744,8 +1744,8 @@
* locked).
*/
static void end_io_requests(struct fuse_conn *fc)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
{
while (!list_empty(&fc->io)) {
struct fuse_req *req =
@@ -1769,6 +1769,16 @@
}
}
+static void end_queued_requests(struct fuse_conn *fc)
+__releases(fc->lock)
+__acquires(fc->lock)
+{
+ fc->max_background = UINT_MAX;
+ flush_bg_queue(fc);
+ end_requests(fc, &fc->pending);
+ end_requests(fc, &fc->processing);
+}
+
/*
* Abort all requests.
*
@@ -1795,8 +1805,7 @@
fc->connected = 0;
fc->blocked = 0;
end_io_requests(fc);
- end_requests(fc, &fc->pending);
- end_requests(fc, &fc->processing);
+ end_queued_requests(fc);
wake_up_all(&fc->waitq);
wake_up_all(&fc->blocked_waitq);
kill_fasync(&fc->fasync, SIGIO, POLL_IN);
@@ -1811,8 +1820,9 @@
if (fc) {
spin_lock(&fc->lock);
fc->connected = 0;
- end_requests(fc, &fc->pending);
- end_requests(fc, &fc->processing);
+ fc->blocked = 0;
+ end_queued_requests(fc);
+ wake_up_all(&fc->blocked_waitq);
spin_unlock(&fc->lock);
fuse_conn_put(fc);
}
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 147c1f7..c822458 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1144,8 +1144,8 @@
/* Called under fc->lock, may release and reacquire it */
static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
{
struct fuse_inode *fi = get_fuse_inode(req->inode);
loff_t size = i_size_read(req->inode);
@@ -1183,8 +1183,8 @@
* Called with fc->lock
*/
void fuse_flush_writepages(struct inode *inode)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
{
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index cde1248..ac750bd 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -932,7 +932,7 @@
do {
prepare_to_wait(&sdp->sd_logd_waitq, &wait,
- TASK_UNINTERRUPTIBLE);
+ TASK_INTERRUPTIBLE);
if (!gfs2_ail_flush_reqd(sdp) &&
!gfs2_jrnl_flush_reqd(sdp) &&
!kthread_should_stop())
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index e20ee85..f3f3578 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -115,7 +115,7 @@
inode_inc_link_count(dir);
- inode = minix_new_inode(dir, mode, &err);
+ inode = minix_new_inode(dir, S_IFDIR | mode, &err);
if (!inode)
goto out_dir;
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 6c2aad4..f7e13db 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -63,6 +63,7 @@
config NFS_V4
bool "NFS client support for NFS version 4"
depends on NFS_FS
+ select SUNRPC_GSS
help
This option enables support for version 4 of the NFS protocol
(RFC 3530) in the kernel's NFS client.
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 4e7df2a..e734072 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -275,7 +275,7 @@
sin1->sin6_scope_id != sin2->sin6_scope_id)
return 0;
- return ipv6_addr_equal(&sin1->sin6_addr, &sin1->sin6_addr);
+ return ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr);
}
#else /* !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE) */
static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1,
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index eb51bd6..05bf3c0 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -723,10 +723,6 @@
default:
BUG();
}
- if (res < 0)
- dprintk(KERN_WARNING "%s: VFS is out of sync with lock manager"
- " - error %d!\n",
- __func__, res);
return res;
}
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index ec3966e..f4cbf0c 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -431,7 +431,15 @@
goto out_err;
error = server->nfs_client->rpc_ops->statfs(server, fh, &res);
+ if (unlikely(error == -ESTALE)) {
+ struct dentry *pd_dentry;
+ pd_dentry = dget_parent(dentry);
+ if (pd_dentry != NULL) {
+ nfs_zap_caches(pd_dentry->d_inode);
+ dput(pd_dentry);
+ }
+ }
nfs_free_fattr(res.fattr);
if (error < 0)
goto out_err;
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index 95932f5..4264377 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -69,6 +69,7 @@
depends on NFSD && PROC_FS && EXPERIMENTAL
select NFSD_V3
select FS_POSIX_ACL
+ select SUNRPC_GSS
help
This option enables support in your system's NFS server for
version 4 of the NFS protocol (RFC 3530).
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 3dfef06..cf0d2ff 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -440,7 +440,7 @@
static int nfs4_access_to_omode(u32 access)
{
- switch (access) {
+ switch (access & NFS4_SHARE_ACCESS_BOTH) {
case NFS4_SHARE_ACCESS_READ:
return O_RDONLY;
case NFS4_SHARE_ACCESS_WRITE:
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index a76e0aa..3919150 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -209,7 +209,10 @@
}
inode->i_mode = new_mode;
+ inode->i_ctime = CURRENT_TIME;
di->i_mode = cpu_to_le16(inode->i_mode);
+ di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
+ di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
ocfs2_journal_dirty(handle, di_bh);
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 215e12c..592fae5 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -6672,7 +6672,7 @@
last_page_bytes = PAGE_ALIGN(end);
index = start >> PAGE_CACHE_SHIFT;
do {
- pages[numpages] = grab_cache_page(mapping, index);
+ pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS);
if (!pages[numpages]) {
ret = -ENOMEM;
mlog_errno(ret);
diff --git a/fs/ocfs2/blockcheck.c b/fs/ocfs2/blockcheck.c
index ec6d123..c7ee03c 100644
--- a/fs/ocfs2/blockcheck.c
+++ b/fs/ocfs2/blockcheck.c
@@ -439,7 +439,7 @@
ocfs2_blockcheck_inc_failure(stats);
mlog(ML_ERROR,
- "CRC32 failed: stored: %u, computed %u. Applying ECC.\n",
+ "CRC32 failed: stored: 0x%x, computed 0x%x. Applying ECC.\n",
(unsigned int)check.bc_crc32e, (unsigned int)crc);
/* Ok, try ECC fixups */
@@ -453,7 +453,7 @@
goto out;
}
- mlog(ML_ERROR, "Fixed CRC32 failed: stored: %u, computed %u\n",
+ mlog(ML_ERROR, "Fixed CRC32 failed: stored: 0x%x, computed 0x%x\n",
(unsigned int)check.bc_crc32e, (unsigned int)crc);
rc = -EIO;
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 1361997..cbe2f05 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -977,7 +977,7 @@
int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
size_t caller_veclen, u8 target_node, int *status)
{
- int ret;
+ int ret = 0;
struct o2net_msg *msg = NULL;
size_t veclen, caller_bytes = 0;
struct kvec *vec = NULL;
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index f04ebcf..c49f6de 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -3931,6 +3931,15 @@
goto out_commit;
}
+ cpos = split_hash;
+ ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle,
+ data_ac, meta_ac, new_dx_leaves,
+ num_dx_leaves);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
for (i = 0; i < num_dx_leaves; i++) {
ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
orig_dx_leaves[i],
@@ -3939,15 +3948,14 @@
mlog_errno(ret);
goto out_commit;
}
- }
- cpos = split_hash;
- ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle,
- data_ac, meta_ac, new_dx_leaves,
- num_dx_leaves);
- if (ret) {
- mlog_errno(ret);
- goto out_commit;
+ ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
+ new_dx_leaves[i],
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
}
ocfs2_dx_dir_transfer_leaf(dir, split_hash, handle, tmp_dx_leaf,
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index 4b6ae2c..7652989 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -1030,6 +1030,7 @@
struct dlm_lock_resource *res);
void dlm_clean_master_list(struct dlm_ctxt *dlm,
u8 dead_node);
+void dlm_force_free_mles(struct dlm_ctxt *dlm);
int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock);
int __dlm_lockres_has_locks(struct dlm_lock_resource *res);
int __dlm_lockres_unused(struct dlm_lock_resource *res);
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index 5efdd37..901ca52 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -636,8 +636,14 @@
spin_lock(&dlm->track_lock);
if (oldres)
track_list = &oldres->tracking;
- else
+ else {
track_list = &dlm->tracking_list;
+ if (list_empty(track_list)) {
+ dl = NULL;
+ spin_unlock(&dlm->track_lock);
+ goto bail;
+ }
+ }
list_for_each_entry(res, track_list, tracking) {
if (&res->tracking == &dlm->tracking_list)
@@ -660,6 +666,7 @@
} else
dl = NULL;
+bail:
/* passed to seq_show */
return dl;
}
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 153abb5..11a5c87 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -693,6 +693,7 @@
dlm_mark_domain_leaving(dlm);
dlm_leave_domain(dlm);
+ dlm_force_free_mles(dlm);
dlm_complete_dlm_shutdown(dlm);
}
dlm_put(dlm);
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index ffb4c68..f564b0e 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -3433,3 +3433,43 @@
wake_up(&res->wq);
wake_up(&dlm->migration_wq);
}
+
+void dlm_force_free_mles(struct dlm_ctxt *dlm)
+{
+ int i;
+ struct hlist_head *bucket;
+ struct dlm_master_list_entry *mle;
+ struct hlist_node *tmp, *list;
+
+ /*
+ * We notified all other nodes that we are exiting the domain and
+ * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still
+ * around we force free them and wake any processes that are waiting
+ * on the mles
+ */
+ spin_lock(&dlm->spinlock);
+ spin_lock(&dlm->master_lock);
+
+ BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING);
+ BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES));
+
+ for (i = 0; i < DLM_HASH_BUCKETS; i++) {
+ bucket = dlm_master_hash(dlm, i);
+ hlist_for_each_safe(list, tmp, bucket) {
+ mle = hlist_entry(list, struct dlm_master_list_entry,
+ master_hash_node);
+ if (mle->type != DLM_MLE_BLOCK) {
+ mlog(ML_ERROR, "bad mle: %p\n", mle);
+ dlm_print_one_mle(mle);
+ }
+ atomic_set(&mle->woken, 1);
+ wake_up(&mle->wq);
+
+ __dlm_unlink_mle(dlm, mle);
+ __dlm_mle_detach_hb_events(dlm, mle);
+ __dlm_put_mle(mle);
+ }
+ }
+ spin_unlock(&dlm->master_lock);
+ spin_unlock(&dlm->spinlock);
+}
diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
index d1ce48e..1d596d8 100644
--- a/fs/ocfs2/dlmglue.h
+++ b/fs/ocfs2/dlmglue.h
@@ -84,6 +84,7 @@
OI_LS_PARENT,
OI_LS_RENAME1,
OI_LS_RENAME2,
+ OI_LS_REFLINK_TARGET,
};
int ocfs2_dlm_init(struct ocfs2_super *osb);
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 81296b4..9a03c15 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -36,6 +36,7 @@
#include <linux/writeback.h>
#include <linux/falloc.h>
#include <linux/quotaops.h>
+#include <linux/blkdev.h>
#define MLOG_MASK_PREFIX ML_INODE
#include <cluster/masklog.h>
@@ -190,8 +191,16 @@
if (err)
goto bail;
- if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
+ if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) {
+ /*
+ * We still have to flush drive's caches to get data to the
+ * platter
+ */
+ if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER)
+ blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL,
+ NULL, BLKDEV_IFL_WAIT);
goto bail;
+ }
journal = osb->journal->j_journal;
err = jbd2_journal_force_commit(journal);
@@ -774,7 +783,7 @@
BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT));
BUG_ON(abs_from & (inode->i_blkbits - 1));
- page = grab_cache_page(mapping, index);
+ page = find_or_create_page(mapping, index, GFP_NOFS);
if (!page) {
ret = -ENOMEM;
mlog_errno(ret);
@@ -2329,7 +2338,7 @@
BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
- ((file->f_flags & O_DIRECT) && has_refcount)) {
+ ((file->f_flags & O_DIRECT) && !direct_io)) {
ret = filemap_fdatawrite_range(file->f_mapping, pos,
pos + count - 1);
if (ret < 0)
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 0492464..eece3e0 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -488,7 +488,11 @@
OCFS2_BH_IGNORE_CACHE);
} else {
status = ocfs2_read_blocks_sync(osb, args->fi_blkno, 1, &bh);
- if (!status)
+ /*
+ * If buffer is in jbd, then its checksum may not have been
+ * computed as yet.
+ */
+ if (!status && !buffer_jbd(bh))
status = ocfs2_validate_inode_block(osb->sb, bh);
}
if (status < 0) {
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index af2b8fe..4c18f4a 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -74,9 +74,11 @@
/*
* Another node might have truncated while we were waiting on
* cluster locks.
+ * We don't check size == 0 before the shift. This is borrowed
+ * from do_generic_file_read.
*/
- last_index = size >> PAGE_CACHE_SHIFT;
- if (page->index > last_index) {
+ last_index = (size - 1) >> PAGE_CACHE_SHIFT;
+ if (unlikely(!size || page->index > last_index)) {
ret = -EINVAL;
goto out;
}
@@ -107,7 +109,7 @@
* because the "write" would invalidate their data.
*/
if (page->index == last_index)
- len = size & ~PAGE_CACHE_MASK;
+ len = ((size - 1) & ~PAGE_CACHE_MASK) + 1;
ret = ocfs2_write_begin_nolock(mapping, pos, len, 0, &locked_page,
&fsdata, di_bh, page);
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index f171b51..a00dda2 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -472,32 +472,23 @@
return status;
}
-static int ocfs2_mknod_locked(struct ocfs2_super *osb,
- struct inode *dir,
- struct inode *inode,
- dev_t dev,
- struct buffer_head **new_fe_bh,
- struct buffer_head *parent_fe_bh,
- handle_t *handle,
- struct ocfs2_alloc_context *inode_ac)
+static int __ocfs2_mknod_locked(struct inode *dir,
+ struct inode *inode,
+ dev_t dev,
+ struct buffer_head **new_fe_bh,
+ struct buffer_head *parent_fe_bh,
+ handle_t *handle,
+ struct ocfs2_alloc_context *inode_ac,
+ u64 fe_blkno, u64 suballoc_loc, u16 suballoc_bit)
{
int status = 0;
+ struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
struct ocfs2_dinode *fe = NULL;
struct ocfs2_extent_list *fel;
- u64 suballoc_loc, fe_blkno = 0;
- u16 suballoc_bit;
u16 feat;
*new_fe_bh = NULL;
- status = ocfs2_claim_new_inode(handle, dir, parent_fe_bh,
- inode_ac, &suballoc_loc,
- &suballoc_bit, &fe_blkno);
- if (status < 0) {
- mlog_errno(status);
- goto leave;
- }
-
/* populate as many fields early on as possible - many of
* these are used by the support functions here and in
* callers. */
@@ -591,6 +582,34 @@
return status;
}
+static int ocfs2_mknod_locked(struct ocfs2_super *osb,
+ struct inode *dir,
+ struct inode *inode,
+ dev_t dev,
+ struct buffer_head **new_fe_bh,
+ struct buffer_head *parent_fe_bh,
+ handle_t *handle,
+ struct ocfs2_alloc_context *inode_ac)
+{
+ int status = 0;
+ u64 suballoc_loc, fe_blkno = 0;
+ u16 suballoc_bit;
+
+ *new_fe_bh = NULL;
+
+ status = ocfs2_claim_new_inode(handle, dir, parent_fe_bh,
+ inode_ac, &suballoc_loc,
+ &suballoc_bit, &fe_blkno);
+ if (status < 0) {
+ mlog_errno(status);
+ return status;
+ }
+
+ return __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh,
+ parent_fe_bh, handle, inode_ac,
+ fe_blkno, suballoc_loc, suballoc_bit);
+}
+
static int ocfs2_mkdir(struct inode *dir,
struct dentry *dentry,
int mode)
@@ -1852,61 +1871,117 @@
return status;
}
+static int ocfs2_lookup_lock_orphan_dir(struct ocfs2_super *osb,
+ struct inode **ret_orphan_dir,
+ struct buffer_head **ret_orphan_dir_bh)
+{
+ struct inode *orphan_dir_inode;
+ struct buffer_head *orphan_dir_bh = NULL;
+ int ret = 0;
+
+ orphan_dir_inode = ocfs2_get_system_file_inode(osb,
+ ORPHAN_DIR_SYSTEM_INODE,
+ osb->slot_num);
+ if (!orphan_dir_inode) {
+ ret = -ENOENT;
+ mlog_errno(ret);
+ return ret;
+ }
+
+ mutex_lock(&orphan_dir_inode->i_mutex);
+
+ ret = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1);
+ if (ret < 0) {
+ mutex_unlock(&orphan_dir_inode->i_mutex);
+ iput(orphan_dir_inode);
+
+ mlog_errno(ret);
+ return ret;
+ }
+
+ *ret_orphan_dir = orphan_dir_inode;
+ *ret_orphan_dir_bh = orphan_dir_bh;
+
+ return 0;
+}
+
+static int __ocfs2_prepare_orphan_dir(struct inode *orphan_dir_inode,
+ struct buffer_head *orphan_dir_bh,
+ u64 blkno,
+ char *name,
+ struct ocfs2_dir_lookup_result *lookup)
+{
+ int ret;
+ struct ocfs2_super *osb = OCFS2_SB(orphan_dir_inode->i_sb);
+
+ ret = ocfs2_blkno_stringify(blkno, name);
+ if (ret < 0) {
+ mlog_errno(ret);
+ return ret;
+ }
+
+ ret = ocfs2_prepare_dir_for_insert(osb, orphan_dir_inode,
+ orphan_dir_bh, name,
+ OCFS2_ORPHAN_NAMELEN, lookup);
+ if (ret < 0) {
+ mlog_errno(ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ocfs2_prepare_orphan_dir() - Prepare an orphan directory for
+ * insertion of an orphan.
+ * @osb: ocfs2 file system
+ * @ret_orphan_dir: Orphan dir inode - returned locked!
+ * @blkno: Actual block number of the inode to be inserted into orphan dir.
+ * @lookup: dir lookup result, to be passed back into functions like
+ * ocfs2_orphan_add
+ *
+ * Returns zero on success and the ret_orphan_dir, name and lookup
+ * fields will be populated.
+ *
+ * Returns non-zero on failure.
+ */
static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
struct inode **ret_orphan_dir,
u64 blkno,
char *name,
struct ocfs2_dir_lookup_result *lookup)
{
- struct inode *orphan_dir_inode;
+ struct inode *orphan_dir_inode = NULL;
struct buffer_head *orphan_dir_bh = NULL;
- int status = 0;
+ int ret = 0;
- status = ocfs2_blkno_stringify(blkno, name);
- if (status < 0) {
- mlog_errno(status);
- return status;
+ ret = ocfs2_lookup_lock_orphan_dir(osb, &orphan_dir_inode,
+ &orphan_dir_bh);
+ if (ret < 0) {
+ mlog_errno(ret);
+ return ret;
}
- orphan_dir_inode = ocfs2_get_system_file_inode(osb,
- ORPHAN_DIR_SYSTEM_INODE,
- osb->slot_num);
- if (!orphan_dir_inode) {
- status = -ENOENT;
- mlog_errno(status);
- return status;
- }
-
- mutex_lock(&orphan_dir_inode->i_mutex);
-
- status = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1);
- if (status < 0) {
- mlog_errno(status);
- goto leave;
- }
-
- status = ocfs2_prepare_dir_for_insert(osb, orphan_dir_inode,
- orphan_dir_bh, name,
- OCFS2_ORPHAN_NAMELEN, lookup);
- if (status < 0) {
- ocfs2_inode_unlock(orphan_dir_inode, 1);
-
- mlog_errno(status);
- goto leave;
+ ret = __ocfs2_prepare_orphan_dir(orphan_dir_inode, orphan_dir_bh,
+ blkno, name, lookup);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
}
*ret_orphan_dir = orphan_dir_inode;
-leave:
- if (status) {
+out:
+ brelse(orphan_dir_bh);
+
+ if (ret) {
+ ocfs2_inode_unlock(orphan_dir_inode, 1);
mutex_unlock(&orphan_dir_inode->i_mutex);
iput(orphan_dir_inode);
}
- brelse(orphan_dir_bh);
-
- mlog_exit(status);
- return status;
+ mlog_exit(ret);
+ return ret;
}
static int ocfs2_orphan_add(struct ocfs2_super *osb,
@@ -2053,6 +2128,99 @@
return status;
}
+/**
+ * ocfs2_prep_new_orphaned_file() - Prepare the orphan dir to recieve a newly
+ * allocated file. This is different from the typical 'add to orphan dir'
+ * operation in that the inode does not yet exist. This is a problem because
+ * the orphan dir stringifies the inode block number to come up with it's
+ * dirent. Obviously if the inode does not yet exist we have a chicken and egg
+ * problem. This function works around it by calling deeper into the orphan
+ * and suballoc code than other callers. Use this only by necessity.
+ * @dir: The directory which this inode will ultimately wind up under - not the
+ * orphan dir!
+ * @dir_bh: buffer_head the @dir inode block
+ * @orphan_name: string of length (CFS2_ORPHAN_NAMELEN + 1). Will be filled
+ * with the string to be used for orphan dirent. Pass back to the orphan dir
+ * code.
+ * @ret_orphan_dir: orphan dir inode returned to be passed back into orphan
+ * dir code.
+ * @ret_di_blkno: block number where the new inode will be allocated.
+ * @orphan_insert: Dir insert context to be passed back into orphan dir code.
+ * @ret_inode_ac: Inode alloc context to be passed back to the allocator.
+ *
+ * Returns zero on success and the ret_orphan_dir, name and lookup
+ * fields will be populated.
+ *
+ * Returns non-zero on failure.
+ */
+static int ocfs2_prep_new_orphaned_file(struct inode *dir,
+ struct buffer_head *dir_bh,
+ char *orphan_name,
+ struct inode **ret_orphan_dir,
+ u64 *ret_di_blkno,
+ struct ocfs2_dir_lookup_result *orphan_insert,
+ struct ocfs2_alloc_context **ret_inode_ac)
+{
+ int ret;
+ u64 di_blkno;
+ struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
+ struct inode *orphan_dir = NULL;
+ struct buffer_head *orphan_dir_bh = NULL;
+ struct ocfs2_alloc_context *inode_ac = NULL;
+
+ ret = ocfs2_lookup_lock_orphan_dir(osb, &orphan_dir, &orphan_dir_bh);
+ if (ret < 0) {
+ mlog_errno(ret);
+ return ret;
+ }
+
+ /* reserve an inode spot */
+ ret = ocfs2_reserve_new_inode(osb, &inode_ac);
+ if (ret < 0) {
+ if (ret != -ENOSPC)
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_find_new_inode_loc(dir, dir_bh, inode_ac,
+ &di_blkno);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = __ocfs2_prepare_orphan_dir(orphan_dir, orphan_dir_bh,
+ di_blkno, orphan_name, orphan_insert);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+out:
+ if (ret == 0) {
+ *ret_orphan_dir = orphan_dir;
+ *ret_di_blkno = di_blkno;
+ *ret_inode_ac = inode_ac;
+ /*
+ * orphan_name and orphan_insert are already up to
+ * date via prepare_orphan_dir
+ */
+ } else {
+ /* Unroll reserve_new_inode* */
+ if (inode_ac)
+ ocfs2_free_alloc_context(inode_ac);
+
+ /* Unroll orphan dir locking */
+ mutex_unlock(&orphan_dir->i_mutex);
+ ocfs2_inode_unlock(orphan_dir, 1);
+ iput(orphan_dir);
+ }
+
+ brelse(orphan_dir_bh);
+
+ return 0;
+}
+
int ocfs2_create_inode_in_orphan(struct inode *dir,
int mode,
struct inode **new_inode)
@@ -2068,6 +2236,8 @@
struct buffer_head *new_di_bh = NULL;
struct ocfs2_alloc_context *inode_ac = NULL;
struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
+ u64 uninitialized_var(di_blkno), suballoc_loc;
+ u16 suballoc_bit;
status = ocfs2_inode_lock(dir, &parent_di_bh, 1);
if (status < 0) {
@@ -2076,20 +2246,9 @@
return status;
}
- /*
- * We give the orphan dir the root blkno to fake an orphan name,
- * and allocate enough space for our insertion.
- */
- status = ocfs2_prepare_orphan_dir(osb, &orphan_dir,
- osb->root_blkno,
- orphan_name, &orphan_insert);
- if (status < 0) {
- mlog_errno(status);
- goto leave;
- }
-
- /* reserve an inode spot */
- status = ocfs2_reserve_new_inode(osb, &inode_ac);
+ status = ocfs2_prep_new_orphaned_file(dir, parent_di_bh,
+ orphan_name, &orphan_dir,
+ &di_blkno, &orphan_insert, &inode_ac);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
@@ -2116,17 +2275,20 @@
goto leave;
did_quota_inode = 1;
- inode->i_nlink = 0;
- /* do the real work now. */
- status = ocfs2_mknod_locked(osb, dir, inode,
- 0, &new_di_bh, parent_di_bh, handle,
- inode_ac);
+ status = ocfs2_claim_new_inode_at_loc(handle, dir, inode_ac,
+ &suballoc_loc,
+ &suballoc_bit, di_blkno);
if (status < 0) {
mlog_errno(status);
goto leave;
}
- status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, orphan_name);
+ inode->i_nlink = 0;
+ /* do the real work now. */
+ status = __ocfs2_mknod_locked(dir, inode,
+ 0, &new_di_bh, parent_di_bh, handle,
+ inode_ac, di_blkno, suballoc_loc,
+ suballoc_bit);
if (status < 0) {
mlog_errno(status);
goto leave;
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 33f1c9a..fa31d05 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -235,18 +235,31 @@
#define OCFS2_HAS_REFCOUNT_FL (0x0010)
/* Inode attributes, keep in sync with EXT2 */
-#define OCFS2_SECRM_FL (0x00000001) /* Secure deletion */
-#define OCFS2_UNRM_FL (0x00000002) /* Undelete */
-#define OCFS2_COMPR_FL (0x00000004) /* Compress file */
-#define OCFS2_SYNC_FL (0x00000008) /* Synchronous updates */
-#define OCFS2_IMMUTABLE_FL (0x00000010) /* Immutable file */
-#define OCFS2_APPEND_FL (0x00000020) /* writes to file may only append */
-#define OCFS2_NODUMP_FL (0x00000040) /* do not dump file */
-#define OCFS2_NOATIME_FL (0x00000080) /* do not update atime */
-#define OCFS2_DIRSYNC_FL (0x00010000) /* dirsync behaviour (directories only) */
+#define OCFS2_SECRM_FL FS_SECRM_FL /* Secure deletion */
+#define OCFS2_UNRM_FL FS_UNRM_FL /* Undelete */
+#define OCFS2_COMPR_FL FS_COMPR_FL /* Compress file */
+#define OCFS2_SYNC_FL FS_SYNC_FL /* Synchronous updates */
+#define OCFS2_IMMUTABLE_FL FS_IMMUTABLE_FL /* Immutable file */
+#define OCFS2_APPEND_FL FS_APPEND_FL /* writes to file may only append */
+#define OCFS2_NODUMP_FL FS_NODUMP_FL /* do not dump file */
+#define OCFS2_NOATIME_FL FS_NOATIME_FL /* do not update atime */
+/* Reserved for compression usage... */
+#define OCFS2_DIRTY_FL FS_DIRTY_FL
+#define OCFS2_COMPRBLK_FL FS_COMPRBLK_FL /* One or more compressed clusters */
+#define OCFS2_NOCOMP_FL FS_NOCOMP_FL /* Don't compress */
+#define OCFS2_ECOMPR_FL FS_ECOMPR_FL /* Compression error */
+/* End compression flags --- maybe not all used */
+#define OCFS2_BTREE_FL FS_BTREE_FL /* btree format dir */
+#define OCFS2_INDEX_FL FS_INDEX_FL /* hash-indexed directory */
+#define OCFS2_IMAGIC_FL FS_IMAGIC_FL /* AFS directory */
+#define OCFS2_JOURNAL_DATA_FL FS_JOURNAL_DATA_FL /* Reserved for ext3 */
+#define OCFS2_NOTAIL_FL FS_NOTAIL_FL /* file tail should not be merged */
+#define OCFS2_DIRSYNC_FL FS_DIRSYNC_FL /* dirsync behaviour (directories only) */
+#define OCFS2_TOPDIR_FL FS_TOPDIR_FL /* Top of directory hierarchies*/
+#define OCFS2_RESERVED_FL FS_RESERVED_FL /* reserved for ext2 lib */
-#define OCFS2_FL_VISIBLE (0x000100FF) /* User visible flags */
-#define OCFS2_FL_MODIFIABLE (0x000100FF) /* User modifiable flags */
+#define OCFS2_FL_VISIBLE FS_FL_USER_VISIBLE /* User visible flags */
+#define OCFS2_FL_MODIFIABLE FS_FL_USER_MODIFIABLE /* User modifiable flags */
/*
* Extent record flags (e_node.leaf.flags)
diff --git a/fs/ocfs2/ocfs2_ioctl.h b/fs/ocfs2/ocfs2_ioctl.h
index 2d3420a..5d24150 100644
--- a/fs/ocfs2/ocfs2_ioctl.h
+++ b/fs/ocfs2/ocfs2_ioctl.h
@@ -23,10 +23,10 @@
/*
* ioctl commands
*/
-#define OCFS2_IOC_GETFLAGS _IOR('f', 1, long)
-#define OCFS2_IOC_SETFLAGS _IOW('f', 2, long)
-#define OCFS2_IOC32_GETFLAGS _IOR('f', 1, int)
-#define OCFS2_IOC32_SETFLAGS _IOW('f', 2, int)
+#define OCFS2_IOC_GETFLAGS FS_IOC_GETFLAGS
+#define OCFS2_IOC_SETFLAGS FS_IOC_SETFLAGS
+#define OCFS2_IOC32_GETFLAGS FS_IOC32_GETFLAGS
+#define OCFS2_IOC32_SETFLAGS FS_IOC32_SETFLAGS
/*
* Space reservation / allocation / free ioctls and argument structure
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 73a11cc..efdd756 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -2960,7 +2960,7 @@
if (map_end & (PAGE_CACHE_SIZE - 1))
to = map_end & (PAGE_CACHE_SIZE - 1);
- page = grab_cache_page(mapping, page_index);
+ page = find_or_create_page(mapping, page_index, GFP_NOFS);
/*
* In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page
@@ -3179,7 +3179,8 @@
if (map_end > end)
map_end = end;
- page = grab_cache_page(context->inode->i_mapping, page_index);
+ page = find_or_create_page(context->inode->i_mapping,
+ page_index, GFP_NOFS);
BUG_ON(!page);
wait_on_page_writeback(page);
@@ -4200,8 +4201,9 @@
goto out;
}
- mutex_lock(&new_inode->i_mutex);
- ret = ocfs2_inode_lock(new_inode, &new_bh, 1);
+ mutex_lock_nested(&new_inode->i_mutex, I_MUTEX_CHILD);
+ ret = ocfs2_inode_lock_nested(new_inode, &new_bh, 1,
+ OI_LS_REFLINK_TARGET);
if (ret) {
mlog_errno(ret);
goto out_unlock;
diff --git a/fs/ocfs2/reservations.c b/fs/ocfs2/reservations.c
index d8b6e42..3e78db3 100644
--- a/fs/ocfs2/reservations.c
+++ b/fs/ocfs2/reservations.c
@@ -732,25 +732,23 @@
struct ocfs2_alloc_reservation *resv,
int *cstart, int *clen)
{
- unsigned int wanted = *clen;
-
if (resv == NULL || ocfs2_resmap_disabled(resmap))
return -ENOSPC;
spin_lock(&resv_lock);
- /*
- * We don't want to over-allocate for temporary
- * windows. Otherwise, we run the risk of fragmenting the
- * allocation space.
- */
- wanted = ocfs2_resv_window_bits(resmap, resv);
- if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen)
- wanted = *clen;
-
if (ocfs2_resv_empty(resv)) {
- mlog(0, "empty reservation, find new window\n");
+ /*
+ * We don't want to over-allocate for temporary
+ * windows. Otherwise, we run the risk of fragmenting the
+ * allocation space.
+ */
+ unsigned int wanted = ocfs2_resv_window_bits(resmap, resv);
+ if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen)
+ wanted = *clen;
+
+ mlog(0, "empty reservation, find new window\n");
/*
* Try to get a window here. If it works, we must fall
* through and test the bitmap . This avoids some
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index a8e6a95..849c2f0 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -57,11 +57,28 @@
u64 sr_bg_blkno; /* The bg we allocated from. Set
to 0 when a block group is
contiguous. */
+ u64 sr_bg_stable_blkno; /*
+ * Doesn't change, always
+ * set to target block
+ * group descriptor
+ * block.
+ */
u64 sr_blkno; /* The first allocated block */
unsigned int sr_bit_offset; /* The bit in the bg */
unsigned int sr_bits; /* How many bits we claimed */
};
+static u64 ocfs2_group_from_res(struct ocfs2_suballoc_result *res)
+{
+ if (res->sr_blkno == 0)
+ return 0;
+
+ if (res->sr_bg_blkno)
+ return res->sr_bg_blkno;
+
+ return ocfs2_which_suballoc_group(res->sr_blkno, res->sr_bit_offset);
+}
+
static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg);
static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe);
static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl);
@@ -138,6 +155,10 @@
brelse(ac->ac_bh);
ac->ac_bh = NULL;
ac->ac_resv = NULL;
+ if (ac->ac_find_loc_priv) {
+ kfree(ac->ac_find_loc_priv);
+ ac->ac_find_loc_priv = NULL;
+ }
}
void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac)
@@ -336,7 +357,7 @@
static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb,
struct ocfs2_group_desc *bg,
struct ocfs2_chain_list *cl,
- u64 p_blkno, u32 clusters)
+ u64 p_blkno, unsigned int clusters)
{
struct ocfs2_extent_list *el = &bg->bg_list;
struct ocfs2_extent_rec *rec;
@@ -348,7 +369,7 @@
rec->e_blkno = cpu_to_le64(p_blkno);
rec->e_cpos = cpu_to_le32(le16_to_cpu(bg->bg_bits) /
le16_to_cpu(cl->cl_bpc));
- rec->e_leaf_clusters = cpu_to_le32(clusters);
+ rec->e_leaf_clusters = cpu_to_le16(clusters);
le16_add_cpu(&bg->bg_bits, clusters * le16_to_cpu(cl->cl_bpc));
le16_add_cpu(&bg->bg_free_bits_count,
clusters * le16_to_cpu(cl->cl_bpc));
@@ -1678,6 +1699,15 @@
if (!ret)
ocfs2_bg_discontig_fix_result(ac, gd, res);
+ /*
+ * sr_bg_blkno might have been changed by
+ * ocfs2_bg_discontig_fix_result
+ */
+ res->sr_bg_stable_blkno = group_bh->b_blocknr;
+
+ if (ac->ac_find_loc_only)
+ goto out_loc_only;
+
ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh,
res->sr_bits,
le16_to_cpu(gd->bg_chain));
@@ -1691,6 +1721,7 @@
if (ret < 0)
mlog_errno(ret);
+out_loc_only:
*bits_left = le16_to_cpu(gd->bg_free_bits_count);
out:
@@ -1708,7 +1739,6 @@
{
int status;
u16 chain;
- u32 tmp_used;
u64 next_group;
struct inode *alloc_inode = ac->ac_inode;
struct buffer_head *group_bh = NULL;
@@ -1770,6 +1800,11 @@
if (!status)
ocfs2_bg_discontig_fix_result(ac, bg, res);
+ /*
+ * sr_bg_blkno might have been changed by
+ * ocfs2_bg_discontig_fix_result
+ */
+ res->sr_bg_stable_blkno = group_bh->b_blocknr;
/*
* Keep track of previous block descriptor read. When
@@ -1796,22 +1831,17 @@
}
}
- /* Ok, claim our bits now: set the info on dinode, chainlist
- * and then the group */
- status = ocfs2_journal_access_di(handle,
- INODE_CACHE(alloc_inode),
- ac->ac_bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
- if (status < 0) {
+ if (ac->ac_find_loc_only)
+ goto out_loc_only;
+
+ status = ocfs2_alloc_dinode_update_counts(alloc_inode, handle,
+ ac->ac_bh, res->sr_bits,
+ chain);
+ if (status) {
mlog_errno(status);
goto bail;
}
- tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
- fe->id1.bitmap1.i_used = cpu_to_le32(res->sr_bits + tmp_used);
- le32_add_cpu(&cl->cl_recs[chain].c_free, -res->sr_bits);
- ocfs2_journal_dirty(handle, ac->ac_bh);
-
status = ocfs2_block_group_set_bits(handle,
alloc_inode,
bg,
@@ -1826,6 +1856,7 @@
mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits,
(unsigned long long)le64_to_cpu(fe->i_blkno));
+out_loc_only:
*bits_left = le16_to_cpu(bg->bg_free_bits_count);
bail:
brelse(group_bh);
@@ -1845,6 +1876,7 @@
int status;
u16 victim, i;
u16 bits_left = 0;
+ u64 hint = ac->ac_last_group;
struct ocfs2_chain_list *cl;
struct ocfs2_dinode *fe;
@@ -1872,7 +1904,7 @@
goto bail;
}
- res->sr_bg_blkno = ac->ac_last_group;
+ res->sr_bg_blkno = hint;
if (res->sr_bg_blkno) {
/* Attempt to short-circuit the usual search mechanism
* by jumping straight to the most recently used
@@ -1896,8 +1928,10 @@
status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
res, &bits_left);
- if (!status)
+ if (!status) {
+ hint = ocfs2_group_from_res(res);
goto set_hint;
+ }
if (status < 0 && status != -ENOSPC) {
mlog_errno(status);
goto bail;
@@ -1920,8 +1954,10 @@
ac->ac_chain = i;
status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
res, &bits_left);
- if (!status)
+ if (!status) {
+ hint = ocfs2_group_from_res(res);
break;
+ }
if (status < 0 && status != -ENOSPC) {
mlog_errno(status);
goto bail;
@@ -1936,7 +1972,7 @@
if (bits_left < min_bits)
ac->ac_last_group = 0;
else
- ac->ac_last_group = res->sr_bg_blkno;
+ ac->ac_last_group = hint;
}
bail:
@@ -2016,6 +2052,136 @@
OCFS2_I(dir)->ip_last_used_slot = ac->ac_alloc_slot;
}
+int ocfs2_find_new_inode_loc(struct inode *dir,
+ struct buffer_head *parent_fe_bh,
+ struct ocfs2_alloc_context *ac,
+ u64 *fe_blkno)
+{
+ int ret;
+ handle_t *handle = NULL;
+ struct ocfs2_suballoc_result *res;
+
+ BUG_ON(!ac);
+ BUG_ON(ac->ac_bits_given != 0);
+ BUG_ON(ac->ac_bits_wanted != 1);
+ BUG_ON(ac->ac_which != OCFS2_AC_USE_INODE);
+
+ res = kzalloc(sizeof(*res), GFP_NOFS);
+ if (res == NULL) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ocfs2_init_inode_ac_group(dir, parent_fe_bh, ac);
+
+ /*
+ * The handle started here is for chain relink. Alternatively,
+ * we could just disable relink for these calls.
+ */
+ handle = ocfs2_start_trans(OCFS2_SB(dir->i_sb), OCFS2_SUBALLOC_ALLOC);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ handle = NULL;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ /*
+ * This will instruct ocfs2_claim_suballoc_bits and
+ * ocfs2_search_one_group to search but save actual allocation
+ * for later.
+ */
+ ac->ac_find_loc_only = 1;
+
+ ret = ocfs2_claim_suballoc_bits(ac, handle, 1, 1, res);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ac->ac_find_loc_priv = res;
+ *fe_blkno = res->sr_blkno;
+
+out:
+ if (handle)
+ ocfs2_commit_trans(OCFS2_SB(dir->i_sb), handle);
+
+ if (ret)
+ kfree(res);
+
+ return ret;
+}
+
+int ocfs2_claim_new_inode_at_loc(handle_t *handle,
+ struct inode *dir,
+ struct ocfs2_alloc_context *ac,
+ u64 *suballoc_loc,
+ u16 *suballoc_bit,
+ u64 di_blkno)
+{
+ int ret;
+ u16 chain;
+ struct ocfs2_suballoc_result *res = ac->ac_find_loc_priv;
+ struct buffer_head *bg_bh = NULL;
+ struct ocfs2_group_desc *bg;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *) ac->ac_bh->b_data;
+
+ /*
+ * Since di_blkno is being passed back in, we check for any
+ * inconsistencies which may have happened between
+ * calls. These are code bugs as di_blkno is not expected to
+ * change once returned from ocfs2_find_new_inode_loc()
+ */
+ BUG_ON(res->sr_blkno != di_blkno);
+
+ ret = ocfs2_read_group_descriptor(ac->ac_inode, di,
+ res->sr_bg_stable_blkno, &bg_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ bg = (struct ocfs2_group_desc *) bg_bh->b_data;
+ chain = le16_to_cpu(bg->bg_chain);
+
+ ret = ocfs2_alloc_dinode_update_counts(ac->ac_inode, handle,
+ ac->ac_bh, res->sr_bits,
+ chain);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_block_group_set_bits(handle,
+ ac->ac_inode,
+ bg,
+ bg_bh,
+ res->sr_bit_offset,
+ res->sr_bits);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits,
+ (unsigned long long)di_blkno);
+
+ atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
+
+ BUG_ON(res->sr_bits != 1);
+
+ *suballoc_loc = res->sr_bg_blkno;
+ *suballoc_bit = res->sr_bit_offset;
+ ac->ac_bits_given++;
+ ocfs2_save_inode_ac_group(dir, ac);
+
+out:
+ brelse(bg_bh);
+
+ return ret;
+}
+
int ocfs2_claim_new_inode(handle_t *handle,
struct inode *dir,
struct buffer_head *parent_fe_bh,
@@ -2567,7 +2733,8 @@
* suballoc_bit.
*/
static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno,
- u16 *suballoc_slot, u16 *suballoc_bit)
+ u16 *suballoc_slot, u64 *group_blkno,
+ u16 *suballoc_bit)
{
int status;
struct buffer_head *inode_bh = NULL;
@@ -2604,6 +2771,8 @@
*suballoc_slot = le16_to_cpu(inode_fe->i_suballoc_slot);
if (suballoc_bit)
*suballoc_bit = le16_to_cpu(inode_fe->i_suballoc_bit);
+ if (group_blkno)
+ *group_blkno = le64_to_cpu(inode_fe->i_suballoc_loc);
bail:
brelse(inode_bh);
@@ -2621,7 +2790,8 @@
*/
static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb,
struct inode *suballoc,
- struct buffer_head *alloc_bh, u64 blkno,
+ struct buffer_head *alloc_bh,
+ u64 group_blkno, u64 blkno,
u16 bit, int *res)
{
struct ocfs2_dinode *alloc_di;
@@ -2642,10 +2812,8 @@
goto bail;
}
- if (alloc_di->i_suballoc_loc)
- bg_blkno = le64_to_cpu(alloc_di->i_suballoc_loc);
- else
- bg_blkno = ocfs2_which_suballoc_group(blkno, bit);
+ bg_blkno = group_blkno ? group_blkno :
+ ocfs2_which_suballoc_group(blkno, bit);
status = ocfs2_read_group_descriptor(suballoc, alloc_di, bg_blkno,
&group_bh);
if (status < 0) {
@@ -2680,6 +2848,7 @@
int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
{
int status;
+ u64 group_blkno = 0;
u16 suballoc_bit = 0, suballoc_slot = 0;
struct inode *inode_alloc_inode;
struct buffer_head *alloc_bh = NULL;
@@ -2687,7 +2856,7 @@
mlog_entry("blkno: %llu", (unsigned long long)blkno);
status = ocfs2_get_suballoc_slot_bit(osb, blkno, &suballoc_slot,
- &suballoc_bit);
+ &group_blkno, &suballoc_bit);
if (status < 0) {
mlog(ML_ERROR, "get alloc slot and bit failed %d\n", status);
goto bail;
@@ -2715,7 +2884,7 @@
}
status = ocfs2_test_suballoc_bit(osb, inode_alloc_inode, alloc_bh,
- blkno, suballoc_bit, res);
+ group_blkno, blkno, suballoc_bit, res);
if (status < 0)
mlog(ML_ERROR, "test suballoc bit failed %d\n", status);
diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h
index a017dd3..b8afabf 100644
--- a/fs/ocfs2/suballoc.h
+++ b/fs/ocfs2/suballoc.h
@@ -56,6 +56,9 @@
u64 ac_max_block; /* Highest block number to allocate. 0 is
is the same as ~0 - unlimited */
+ int ac_find_loc_only; /* hack for reflink operation ordering */
+ struct ocfs2_suballoc_result *ac_find_loc_priv; /* */
+
struct ocfs2_alloc_reservation *ac_resv;
};
@@ -197,4 +200,22 @@
struct ocfs2_alloc_context **meta_ac);
int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res);
+
+
+
+/*
+ * The following two interfaces are for ocfs2_create_inode_in_orphan().
+ */
+int ocfs2_find_new_inode_loc(struct inode *dir,
+ struct buffer_head *parent_fe_bh,
+ struct ocfs2_alloc_context *ac,
+ u64 *fe_blkno);
+
+int ocfs2_claim_new_inode_at_loc(handle_t *handle,
+ struct inode *dir,
+ struct ocfs2_alloc_context *ac,
+ u64 *suballoc_loc,
+ u16 *suballoc_bit,
+ u64 di_blkno);
+
#endif /* _CHAINALLOC_H_ */
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index d03469f..06fa5e7 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -1286,13 +1286,11 @@
xis.inode_bh = xbs.inode_bh = di_bh;
di = (struct ocfs2_dinode *)di_bh->b_data;
- down_read(&oi->ip_xattr_sem);
ret = ocfs2_xattr_ibody_get(inode, name_index, name, buffer,
buffer_size, &xis);
if (ret == -ENODATA && di->i_xattr_loc)
ret = ocfs2_xattr_block_get(inode, name_index, name, buffer,
buffer_size, &xbs);
- up_read(&oi->ip_xattr_sem);
return ret;
}
@@ -1316,8 +1314,10 @@
mlog_errno(ret);
return ret;
}
+ down_read(&OCFS2_I(inode)->ip_xattr_sem);
ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index,
name, buffer, buffer_size);
+ up_read(&OCFS2_I(inode)->ip_xattr_sem);
ocfs2_inode_unlock(inode, 0);
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 180cf5a..3b8b4566 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -146,7 +146,7 @@
u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison);
#endif
-#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
+#ifdef CONFIG_ARCH_USES_PG_UNCACHED
u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached);
#endif
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 439fc1f..1dbca4e8 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -224,7 +224,8 @@
/* We don't show the stack guard page in /proc/maps */
start = vma->vm_start;
if (vma->vm_flags & VM_GROWSDOWN)
- start += PAGE_SIZE;
+ if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
+ start += PAGE_SIZE;
seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
start,
@@ -362,13 +363,13 @@
mss->referenced += PAGE_SIZE;
mapcount = page_mapcount(page);
if (mapcount >= 2) {
- if (pte_dirty(ptent))
+ if (pte_dirty(ptent) || PageDirty(page))
mss->shared_dirty += PAGE_SIZE;
else
mss->shared_clean += PAGE_SIZE;
mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
} else {
- if (pte_dirty(ptent))
+ if (pte_dirty(ptent) || PageDirty(page))
mss->private_dirty += PAGE_SIZE;
else
mss->private_clean += PAGE_SIZE;
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 91c817f..2367fb3 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -163,7 +163,7 @@
static const struct file_operations proc_vmcore_operations = {
.read = read_vmcore,
- .llseek = generic_file_llseek,
+ .llseek = default_llseek,
};
static struct vmcore* __init get_new_element(void)
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index d72cf2b..286e36e 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -1932,7 +1932,8 @@
if (!xfs_buf_zone)
goto out;
- xfslogd_workqueue = create_workqueue("xfslogd");
+ xfslogd_workqueue = alloc_workqueue("xfslogd",
+ WQ_RESCUER | WQ_HIGHPRI, 1);
if (!xfslogd_workqueue)
goto out_free_buf_zone;
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 4fec427..3b9e626 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -785,6 +785,8 @@
{
struct fsxattr fa;
+ memset(&fa, 0, sizeof(struct fsxattr));
+
xfs_ilock(ip, XFS_ILOCK_SHARED);
fa.fsx_xflags = xfs_ip2xflags(ip);
fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog;
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index c7376bf..8ca18e2 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -16,15 +16,27 @@
* While the GPIO programming interface defines valid GPIO numbers
* to be in the range 0..MAX_INT, this library restricts them to the
* smaller range 0..ARCH_NR_GPIOS-1.
+ *
+ * ARCH_NR_GPIOS is somewhat arbitrary; it usually reflects the sum of
+ * builtin/SoC GPIOs plus a number of GPIOs on expanders; the latter is
+ * actually an estimate of a board-specific value.
*/
#ifndef ARCH_NR_GPIOS
#define ARCH_NR_GPIOS 256
#endif
+/*
+ * "valid" GPIO numbers are nonnegative and may be passed to
+ * setup routines like gpio_request(). only some valid numbers
+ * can successfully be requested and used.
+ *
+ * Invalid GPIO numbers are useful for indicating no-such-GPIO in
+ * platform data and other tables.
+ */
+
static inline int gpio_is_valid(int number)
{
- /* only some non-negative numbers are valid */
return ((unsigned)number) < ARCH_NR_GPIOS;
}
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index c9f3cc5..3e5a51a 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -386,7 +386,15 @@
void (*dpms)(struct drm_connector *connector, int mode);
void (*save)(struct drm_connector *connector);
void (*restore)(struct drm_connector *connector);
- enum drm_connector_status (*detect)(struct drm_connector *connector);
+
+ /* Check to see if anything is attached to the connector.
+ * @force is set to false whilst polling, true when checking the
+ * connector due to user request. @force can be used by the driver
+ * to avoid expensive, destructive operations during automated
+ * probing.
+ */
+ enum drm_connector_status (*detect)(struct drm_connector *connector,
+ bool force);
int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height);
int (*set_property)(struct drm_connector *connector, struct drm_property *property,
uint64_t val);
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index b0c1740..c6454cc 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -20,6 +20,7 @@
#include <linux/resource.h>
#define AMBA_NR_IRQS 2
+#define AMBA_CID 0xb105f00d
struct clk;
@@ -70,9 +71,15 @@
#define amba_pclk_disable(d) \
do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0)
-#define amba_config(d) (((d)->periphid >> 24) & 0xff)
-#define amba_rev(d) (((d)->periphid >> 20) & 0x0f)
-#define amba_manf(d) (((d)->periphid >> 12) & 0xff)
-#define amba_part(d) ((d)->periphid & 0xfff)
+/* Some drivers don't use the struct amba_device */
+#define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff)
+#define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f)
+#define AMBA_MANF_BITS(a) (((a) >> 12) & 0xff)
+#define AMBA_PART_BITS(a) ((a) & 0xfff)
+
+#define amba_config(d) AMBA_CONFIG_BITS((d)->periphid)
+#define amba_rev(d) AMBA_REV_BITS((d)->periphid)
+#define amba_manf(d) AMBA_MANF_BITS((d)->periphid)
+#define amba_part(d) AMBA_PART_BITS((d)->periphid)
#endif
diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h
index ca84ce7..f4ee9ac 100644
--- a/include/linux/amba/mmci.h
+++ b/include/linux/amba/mmci.h
@@ -24,6 +24,7 @@
* whether a card is present in the MMC slot or not
* @gpio_wp: read this GPIO pin to see if the card is write protected
* @gpio_cd: read this GPIO pin to detect card insertion
+ * @cd_invert: true if the gpio_cd pin value is active low
* @capabilities: the capabilities of the block as implemented in
* this platform, signify anything MMC_CAP_* from mmc/host.h
*/
@@ -35,6 +36,7 @@
unsigned int (*status)(struct device *);
int gpio_wp;
int gpio_cd;
+ bool cd_invert;
unsigned long capabilities;
};
diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h
index e1b634b..6021588 100644
--- a/include/linux/amba/serial.h
+++ b/include/linux/amba/serial.h
@@ -32,7 +32,9 @@
#define UART01x_RSR 0x04 /* Receive status register (Read). */
#define UART01x_ECR 0x04 /* Error clear register (Write). */
#define UART010_LCRH 0x08 /* Line control register, high byte. */
+#define ST_UART011_DMAWM 0x08 /* DMA watermark configure register. */
#define UART010_LCRM 0x0C /* Line control register, middle byte. */
+#define ST_UART011_TIMEOUT 0x0C /* Timeout period register. */
#define UART010_LCRL 0x10 /* Line control register, low byte. */
#define UART010_CR 0x14 /* Control register. */
#define UART01x_FR 0x18 /* Flag register (Read only). */
@@ -51,6 +53,15 @@
#define UART011_MIS 0x40 /* Masked interrupt status. */
#define UART011_ICR 0x44 /* Interrupt clear register. */
#define UART011_DMACR 0x48 /* DMA control register. */
+#define ST_UART011_XFCR 0x50 /* XON/XOFF control register. */
+#define ST_UART011_XON1 0x54 /* XON1 register. */
+#define ST_UART011_XON2 0x58 /* XON2 register. */
+#define ST_UART011_XOFF1 0x5C /* XON1 register. */
+#define ST_UART011_XOFF2 0x60 /* XON2 register. */
+#define ST_UART011_ITCR 0x80 /* Integration test control register. */
+#define ST_UART011_ITIP 0x84 /* Integration test input register. */
+#define ST_UART011_ABCR 0x100 /* Autobaud control register. */
+#define ST_UART011_ABIMSC 0x15C /* Autobaud interrupt mask/clear register. */
#define UART011_DR_OE (1 << 11)
#define UART011_DR_BE (1 << 10)
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index ed3e92e..0c99102 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -578,7 +578,12 @@
void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
int cgroup_scan_tasks(struct cgroup_scanner *scan);
int cgroup_attach_task(struct cgroup *, struct task_struct *);
-int cgroup_attach_task_current_cg(struct task_struct *);
+int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
+
+static inline int cgroup_attach_task_current_cg(struct task_struct *tsk)
+{
+ return cgroup_attach_task_all(current, tsk);
+}
/*
* CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
@@ -636,6 +641,11 @@
}
/* No cgroups - nothing to do */
+static inline int cgroup_attach_task_all(struct task_struct *from,
+ struct task_struct *t)
+{
+ return 0;
+}
static inline int cgroup_attach_task_current_cg(struct task_struct *t)
{
return 0;
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 9ddc878..5778b55 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -360,5 +360,8 @@
const struct compat_iovec __user *uvector, unsigned long nr_segs,
unsigned long fast_segs, struct iovec *fast_pointer,
struct iovec **ret_pointer);
+
+extern void __user *compat_alloc_user_space(unsigned long len);
+
#endif /* CONFIG_COMPAT */
#endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index ce29b81..ba8319a 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -102,6 +102,9 @@
return DMA_BIT_MASK(32);
}
+#ifdef ARCH_HAS_DMA_SET_COHERENT_MASK
+int dma_set_coherent_mask(struct device *dev, u64 mask);
+#else
static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
{
if (!dma_supported(dev, mask))
@@ -109,6 +112,7 @@
dev->coherent_dma_mask = mask;
return 0;
}
+#endif
extern u64 dma_get_required_mask(struct device *dev);
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 2c958f4..926b503 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -136,6 +136,7 @@
extern int elevator_init(struct request_queue *, char *);
extern void elevator_exit(struct elevator_queue *);
+extern int elevator_change(struct request_queue *, const char *);
extern int elv_rq_merge_ok(struct request *, struct bio *);
/*
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 76041b6..63d069b 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1093,6 +1093,10 @@
#include <linux/fcntl.h>
+/* temporary stubs for BKL removal */
+#define lock_flocks() lock_kernel()
+#define unlock_flocks() unlock_kernel()
+
extern void send_sigio(struct fown_struct *fown, int fd, int band);
#ifdef CONFIG_FILE_LOCKING
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 03f616b..e41f7dd 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -13,6 +13,7 @@
#include <linux/errno.h>
struct device;
+struct gpio_chip;
/*
* Some platforms don't support the GPIO programming interface.
diff --git a/include/linux/i2c/sx150x.h b/include/linux/i2c/sx150x.h
index ee3049c..52baa79 100644
--- a/include/linux/i2c/sx150x.h
+++ b/include/linux/i2c/sx150x.h
@@ -63,6 +63,9 @@
* IRQ lines will appear. Similarly to gpio_base, the expander
* will create a block of irqs beginning at this number.
* This value is ignored if irq_summary is < 0.
+ * @reset_during_probe: If set to true, the driver will trigger a full
+ * reset of the chip at the beginning of the probe
+ * in order to place it in a known state.
*/
struct sx150x_platform_data {
unsigned gpio_base;
@@ -73,6 +76,7 @@
u16 io_polarity;
int irq_summary;
unsigned irq_base;
+ bool reset_during_probe;
};
#endif /* __LINUX_I2C_SX150X_H */
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 0a6b3d5..7fb5927 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -79,7 +79,7 @@
}
/* Atomic map/unmap */
-static inline void *
+static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset,
int slot)
@@ -94,12 +94,12 @@
}
static inline void
-io_mapping_unmap_atomic(void *vaddr, int slot)
+io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
{
iounmap_atomic(vaddr, slot);
}
-static inline void *
+static inline void __iomem *
io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
{
resource_size_t phys_addr;
@@ -111,7 +111,7 @@
}
static inline void
-io_mapping_unmap(void *vaddr)
+io_mapping_unmap(void __iomem *vaddr)
{
iounmap(vaddr);
}
@@ -125,38 +125,38 @@
static inline struct io_mapping *
io_mapping_create_wc(resource_size_t base, unsigned long size)
{
- return (struct io_mapping *) ioremap_wc(base, size);
+ return (struct io_mapping __force *) ioremap_wc(base, size);
}
static inline void
io_mapping_free(struct io_mapping *mapping)
{
- iounmap(mapping);
+ iounmap((void __force __iomem *) mapping);
}
/* Atomic map/unmap */
-static inline void *
+static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset,
int slot)
{
- return ((char *) mapping) + offset;
+ return ((char __force __iomem *) mapping) + offset;
}
static inline void
-io_mapping_unmap_atomic(void *vaddr, int slot)
+io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
{
}
/* Non-atomic map/unmap */
-static inline void *
+static inline void __iomem *
io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
{
- return ((char *) mapping) + offset;
+ return ((char __force __iomem *) mapping) + offset;
}
static inline void
-io_mapping_unmap(void *vaddr)
+io_mapping_unmap(void __iomem *vaddr)
{
}
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 4aa95f2..62dbee5 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -214,7 +214,7 @@
*/
#define kfifo_reset(fifo) \
(void)({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
__tmp->kfifo.in = __tmp->kfifo.out = 0; \
})
@@ -228,7 +228,7 @@
*/
#define kfifo_reset_out(fifo) \
(void)({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
__tmp->kfifo.out = __tmp->kfifo.in; \
})
@@ -238,7 +238,7 @@
*/
#define kfifo_len(fifo) \
({ \
- typeof(fifo + 1) __tmpl = (fifo); \
+ typeof((fifo) + 1) __tmpl = (fifo); \
__tmpl->kfifo.in - __tmpl->kfifo.out; \
})
@@ -248,7 +248,7 @@
*/
#define kfifo_is_empty(fifo) \
({ \
- typeof(fifo + 1) __tmpq = (fifo); \
+ typeof((fifo) + 1) __tmpq = (fifo); \
__tmpq->kfifo.in == __tmpq->kfifo.out; \
})
@@ -258,7 +258,7 @@
*/
#define kfifo_is_full(fifo) \
({ \
- typeof(fifo + 1) __tmpq = (fifo); \
+ typeof((fifo) + 1) __tmpq = (fifo); \
kfifo_len(__tmpq) > __tmpq->kfifo.mask; \
})
@@ -269,7 +269,7 @@
#define kfifo_avail(fifo) \
__kfifo_must_check_helper( \
({ \
- typeof(fifo + 1) __tmpq = (fifo); \
+ typeof((fifo) + 1) __tmpq = (fifo); \
const size_t __recsize = sizeof(*__tmpq->rectype); \
unsigned int __avail = kfifo_size(__tmpq) - kfifo_len(__tmpq); \
(__recsize) ? ((__avail <= __recsize) ? 0 : \
@@ -284,7 +284,7 @@
*/
#define kfifo_skip(fifo) \
(void)({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
if (__recsize) \
@@ -302,7 +302,7 @@
#define kfifo_peek_len(fifo) \
__kfifo_must_check_helper( \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
(!__recsize) ? kfifo_len(__tmp) * sizeof(*__tmp->type) : \
@@ -325,7 +325,7 @@
#define kfifo_alloc(fifo, size, gfp_mask) \
__kfifo_must_check_helper( \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
__is_kfifo_ptr(__tmp) ? \
__kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \
@@ -339,7 +339,7 @@
*/
#define kfifo_free(fifo) \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
if (__is_kfifo_ptr(__tmp)) \
__kfifo_free(__kfifo); \
@@ -358,7 +358,7 @@
*/
#define kfifo_init(fifo, buffer, size) \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
__is_kfifo_ptr(__tmp) ? \
__kfifo_init(__kfifo, buffer, size, sizeof(*__tmp->type)) : \
@@ -379,8 +379,8 @@
*/
#define kfifo_put(fifo, val) \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
- typeof(val + 1) __val = (val); \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ typeof((val) + 1) __val = (val); \
unsigned int __ret; \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -421,8 +421,8 @@
#define kfifo_get(fifo, val) \
__kfifo_must_check_helper( \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
- typeof(val + 1) __val = (val); \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ typeof((val) + 1) __val = (val); \
unsigned int __ret; \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -462,8 +462,8 @@
#define kfifo_peek(fifo, val) \
__kfifo_must_check_helper( \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
- typeof(val + 1) __val = (val); \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ typeof((val) + 1) __val = (val); \
unsigned int __ret; \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -501,8 +501,8 @@
*/
#define kfifo_in(fifo, buf, n) \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
- typeof(buf + 1) __buf = (buf); \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ typeof((buf) + 1) __buf = (buf); \
unsigned long __n = (n); \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -554,8 +554,8 @@
#define kfifo_out(fifo, buf, n) \
__kfifo_must_check_helper( \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
- typeof(buf + 1) __buf = (buf); \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ typeof((buf) + 1) __buf = (buf); \
unsigned long __n = (n); \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -611,7 +611,7 @@
#define kfifo_from_user(fifo, from, len, copied) \
__kfifo_must_check_helper( \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
const void __user *__from = (from); \
unsigned int __len = (len); \
unsigned int *__copied = (copied); \
@@ -639,7 +639,7 @@
#define kfifo_to_user(fifo, to, len, copied) \
__kfifo_must_check_helper( \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
void __user *__to = (to); \
unsigned int __len = (len); \
unsigned int *__copied = (copied); \
@@ -666,7 +666,7 @@
*/
#define kfifo_dma_in_prepare(fifo, sgl, nents, len) \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
struct scatterlist *__sgl = (sgl); \
int __nents = (nents); \
unsigned int __len = (len); \
@@ -690,7 +690,7 @@
*/
#define kfifo_dma_in_finish(fifo, len) \
(void)({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
unsigned int __len = (len); \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -717,7 +717,7 @@
*/
#define kfifo_dma_out_prepare(fifo, sgl, nents, len) \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
struct scatterlist *__sgl = (sgl); \
int __nents = (nents); \
unsigned int __len = (len); \
@@ -741,7 +741,7 @@
*/
#define kfifo_dma_out_finish(fifo, len) \
(void)({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
unsigned int __len = (len); \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -766,8 +766,8 @@
#define kfifo_out_peek(fifo, buf, n) \
__kfifo_must_check_helper( \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
- typeof(buf + 1) __buf = (buf); \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ typeof((buf) + 1) __buf = (buf); \
unsigned long __n = (n); \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 74d691e..3319a69 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -16,6 +16,9 @@
struct stable_node;
struct mem_cgroup;
+struct page *ksm_does_need_to_copy(struct page *page,
+ struct vm_area_struct *vma, unsigned long address);
+
#ifdef CONFIG_KSM
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, unsigned long *vm_flags);
@@ -70,19 +73,14 @@
* We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
* but what if the vma was unmerged while the page was swapped out?
*/
-struct page *ksm_does_need_to_copy(struct page *page,
- struct vm_area_struct *vma, unsigned long address);
-static inline struct page *ksm_might_need_to_copy(struct page *page,
+static inline int ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
struct anon_vma *anon_vma = page_anon_vma(page);
- if (!anon_vma ||
- (anon_vma->root == vma->anon_vma->root &&
- page->index == linear_page_index(vma, address)))
- return page;
-
- return ksm_does_need_to_copy(page, vma, address);
+ return anon_vma &&
+ (anon_vma->root != vma->anon_vma->root ||
+ page->index != linear_page_index(vma, address));
}
int page_referenced_ksm(struct page *page,
@@ -115,10 +113,10 @@
return 0;
}
-static inline struct page *ksm_might_need_to_copy(struct page *page,
+static inline int ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
- return page;
+ return 0;
}
static inline int page_referenced_ksm(struct page *page,
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index b288cb7..f549056 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -150,7 +150,7 @@
int i; \
preempt_disable(); \
rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
- for_each_online_cpu(i) { \
+ for_each_possible_cpu(i) { \
arch_spinlock_t *lock; \
lock = &per_cpu(name##_lock, i); \
arch_spin_lock(lock); \
@@ -161,7 +161,7 @@
void name##_global_unlock(void) { \
int i; \
rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
- for_each_online_cpu(i) { \
+ for_each_possible_cpu(i) { \
arch_spinlock_t *lock; \
lock = &per_cpu(name##_lock, i); \
arch_spin_unlock(lock); \
diff --git a/include/linux/libata.h b/include/linux/libata.h
index f010f18..45fb296 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -335,6 +335,7 @@
ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */
ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */
ATA_EHI_QUIET = (1 << 3), /* be quiet */
+ ATA_EHI_NO_RECOVERY = (1 << 4), /* no recovery */
ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */
ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */
@@ -723,6 +724,7 @@
struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */
u8 ctl; /* cache of ATA control register */
u8 last_ctl; /* Cache last written value */
+ struct ata_link* sff_pio_task_link; /* link currently used */
struct delayed_work sff_pio_task;
#ifdef CONFIG_ATA_BMDMA
struct ata_bmdma_prd *bmdma_prd; /* BMDMA SG list */
@@ -1594,7 +1596,7 @@
extern void ata_sff_irq_clear(struct ata_port *ap);
extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
u8 status, int in_wq);
-extern void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay);
+extern void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay);
extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc);
extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc);
extern unsigned int ata_sff_port_intr(struct ata_port *ap,
diff --git a/include/linux/mfd/tc35892.h b/include/linux/mfd/tc35892.h
index e47f770..eff3094 100644
--- a/include/linux/mfd/tc35892.h
+++ b/include/linux/mfd/tc35892.h
@@ -111,9 +111,13 @@
* struct tc35892_gpio_platform_data - TC35892 GPIO platform data
* @gpio_base: first gpio number assigned to TC35892. A maximum of
* %TC35892_NR_GPIOS GPIOs will be allocated.
+ * @setup: callback for board-specific initialization
+ * @remove: callback for board-specific teardown
*/
struct tc35892_gpio_platform_data {
int gpio_base;
+ void (*setup)(struct tc35892 *tc35892, unsigned gpio_base);
+ void (*remove)(struct tc35892 *tc35892, unsigned gpio_base);
};
/**
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e6b1210..74949fb 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -864,6 +864,12 @@
int set_page_dirty_lock(struct page *page);
int clear_page_dirty_for_io(struct page *page);
+/* Is the vma a continuation of the stack vma above it? */
+static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
+{
+ return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
+}
+
extern unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,
unsigned long new_addr, unsigned long len);
diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h
index 329a8fa..245cdac 100644
--- a/include/linux/mmc/sdio.h
+++ b/include/linux/mmc/sdio.h
@@ -38,6 +38,8 @@
* [8:0] Byte/block count
*/
+#define R4_MEMORY_PRESENT (1 << 27)
+
/*
SDIO status in R5
Type
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 6e6e626..3984c4e 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -284,6 +284,13 @@
unsigned long watermark[NR_WMARK];
/*
+ * When free pages are below this point, additional steps are taken
+ * when reading the number of free pages to avoid per-cpu counter
+ * drift allowing watermarks to be breached
+ */
+ unsigned long percpu_drift_mark;
+
+ /*
* We don't know if the memory that we're going to allocate will be freeable
* or/and it will be released eventually, so to avoid totally wasting several
* GB of ram we must reserve some of the lower zone memory (otherwise we risk
@@ -441,6 +448,12 @@
return test_bit(ZONE_OOM_LOCKED, &zone->flags);
}
+#ifdef CONFIG_SMP
+unsigned long zone_nr_free_pages(struct zone *zone);
+#else
+#define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES)
+#endif /* CONFIG_SMP */
+
/*
* The "priority" of VM scanning is how much of the queues we will scan in one
* go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 878cab4..f363bc8 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -78,6 +78,14 @@
# include <linux/mutex-debug.h>
#else
# define __DEBUG_MUTEX_INITIALIZER(lockname)
+/**
+ * mutex_init - initialize the mutex
+ * @mutex: the mutex to be initialized
+ *
+ * Initialize the mutex to unlocked state.
+ *
+ * It is not allowed to initialize an already locked mutex.
+ */
# define mutex_init(mutex) \
do { \
static struct lock_class_key __key; \
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 59d0669..1235669 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -27,8 +27,6 @@
#define MAX_LINKS 32
-struct net;
-
struct sockaddr_nl {
sa_family_t nl_family; /* AF_NETLINK */
unsigned short nl_pad; /* zero */
@@ -151,6 +149,8 @@
#include <linux/capability.h>
#include <linux/skbuff.h>
+struct net;
+
static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
{
return (struct nlmsghdr *)skb->data;
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index 791d510..50d8009 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -63,20 +63,20 @@
unsigned long flags;
bool ret = false;
- rcu_read_lock_bh();
+ local_irq_save(flags);
npinfo = rcu_dereference_bh(skb->dev->npinfo);
if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
goto out;
- spin_lock_irqsave(&npinfo->rx_lock, flags);
+ spin_lock(&npinfo->rx_lock);
/* check rx_flags again with the lock held */
if (npinfo->rx_flags && __netpoll_rx(skb))
ret = true;
- spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+ spin_unlock(&npinfo->rx_lock);
out:
- rcu_read_unlock_bh();
+ local_irq_restore(flags);
return ret;
}
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 10d3330..570fdde 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -393,6 +393,9 @@
#define PCI_DEVICE_ID_VLSI_82C147 0x0105
#define PCI_DEVICE_ID_VLSI_VAS96011 0x0702
+/* AMD RD890 Chipset */
+#define PCI_DEVICE_ID_RD890_IOMMU 0x5a23
+
#define PCI_VENDOR_ID_ADL 0x1005
#define PCI_DEVICE_ID_ADL_2301 0x2301
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index d50ba85..d1a9193 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -274,8 +274,14 @@
int ret;
ret = dquot_alloc_space_nodirty(inode, nr);
- if (!ret)
- mark_inode_dirty_sync(inode);
+ if (!ret) {
+ /*
+ * Mark inode fully dirty. Since we are allocating blocks, inode
+ * would become fully dirty soon anyway and it reportedly
+ * reduces inode_lock contention.
+ */
+ mark_inode_dirty(inode);
+ }
return ret;
}
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
index 7415839..5310d27 100644
--- a/include/linux/semaphore.h
+++ b/include/linux/semaphore.h
@@ -26,6 +26,9 @@
.wait_list = LIST_HEAD_INIT((name).wait_list), \
}
+#define DEFINE_SEMAPHORE(name) \
+ struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
+
#define DECLARE_MUTEX(name) \
struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
diff --git a/include/linux/socket.h b/include/linux/socket.h
index a2fada9..a8f56e1 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -322,7 +322,7 @@
int offset,
unsigned int len, __wsum *csump);
-extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
+extern long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
int offset, int len);
diff --git a/include/linux/spi/dw_spi.h b/include/linux/spi/dw_spi.h
index cc813f9..c91302f 100644
--- a/include/linux/spi/dw_spi.h
+++ b/include/linux/spi/dw_spi.h
@@ -14,7 +14,9 @@
#define SPI_MODE_OFFSET 6
#define SPI_SCPH_OFFSET 6
#define SPI_SCOL_OFFSET 7
+
#define SPI_TMOD_OFFSET 8
+#define SPI_TMOD_MASK (0x3 << SPI_TMOD_OFFSET)
#define SPI_TMOD_TR 0x0 /* xmit & recv */
#define SPI_TMOD_TO 0x1 /* xmit only */
#define SPI_TMOD_RO 0x2 /* recv only */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 569dc72..85f38a63 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -30,7 +30,7 @@
* The high-level client handle
*/
struct rpc_clnt {
- struct kref cl_kref; /* Number of references */
+ atomic_t cl_count; /* Number of references */
struct list_head cl_clients; /* Global list of clients */
struct list_head cl_tasks; /* List of tasks */
spinlock_t cl_lock; /* spinlock */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 2fee51a..7cdd633 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -19,6 +19,7 @@
#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
#define SWAP_FLAG_PRIO_MASK 0x7fff
#define SWAP_FLAG_PRIO_SHIFT 0
+#define SWAP_FLAG_DISCARD 0x10000 /* discard swap cluster after use */
static inline int current_is_kswapd(void)
{
@@ -142,7 +143,7 @@
enum {
SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */
- SWP_DISCARDABLE = (1 << 2), /* blkdev supports discard */
+ SWP_DISCARDABLE = (1 << 2), /* swapon+blkdev support discard */
SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
@@ -315,6 +316,7 @@
extern long total_swap_pages;
extern void si_swapinfo(struct sysinfo *);
extern swp_entry_t get_swap_page(void);
+extern swp_entry_t get_swap_page_of_type(int);
extern int valid_swaphandles(swp_entry_t, unsigned long *);
extern int add_swap_count_continuation(swp_entry_t, gfp_t);
extern void swap_shmem_alloc(swp_entry_t);
@@ -331,13 +333,6 @@
extern int try_to_free_swap(struct page *);
struct backing_dev_info;
-#ifdef CONFIG_HIBERNATION
-void hibernation_freeze_swap(void);
-void hibernation_thaw_swap(void);
-swp_entry_t get_swap_for_hibernation(int type);
-void swap_free_for_hibernation(swp_entry_t val);
-#endif
-
/* linux/mm/thrash.c */
extern struct mm_struct *swap_token_mm;
extern void grab_swap_token(struct mm_struct *);
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 7f43ccd..eaaea37 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -170,6 +170,28 @@
return x;
}
+/*
+ * More accurate version that also considers the currently pending
+ * deltas. For that we need to loop over all cpus to find the current
+ * deltas. There is no synchronization so the result cannot be
+ * exactly accurate either.
+ */
+static inline unsigned long zone_page_state_snapshot(struct zone *zone,
+ enum zone_stat_item item)
+{
+ long x = atomic_long_read(&zone->vm_stat[item]);
+
+#ifdef CONFIG_SMP
+ int cpu;
+ for_each_online_cpu(cpu)
+ x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
+
+ if (x < 0)
+ x = 0;
+#endif
+ return x;
+}
+
extern unsigned long global_reclaimable_pages(void);
extern unsigned long zone_reclaimable_pages(struct zone *zone);
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index f11100f..25e02c9 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -235,6 +235,10 @@
#define work_clear_pending(work) \
clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
+/*
+ * Workqueue flags and constants. For details, please refer to
+ * Documentation/workqueue.txt.
+ */
enum {
WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 45375b4..4d40c4d 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -121,6 +121,7 @@
* IPv6 Address Label subsystem (addrlabel.c)
*/
extern int ipv6_addr_label_init(void);
+extern void ipv6_addr_label_cleanup(void);
extern void ipv6_addr_label_rtnl_register(void);
extern u32 ipv6_addr_label(struct net *net,
const struct in6_addr *addr,
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h
index 726cc35..ef6c24a 100644
--- a/include/net/cls_cgroup.h
+++ b/include/net/cls_cgroup.h
@@ -27,11 +27,17 @@
#ifdef CONFIG_NET_CLS_CGROUP
static inline u32 task_cls_classid(struct task_struct *p)
{
+ int classid;
+
if (in_interrupt())
return 0;
- return container_of(task_subsys_state(p, net_cls_subsys_id),
- struct cgroup_cls_state, css)->classid;
+ rcu_read_lock();
+ classid = container_of(task_subsys_state(p, net_cls_subsys_id),
+ struct cgroup_cls_state, css)->classid;
+ rcu_read_unlock();
+
+ return classid;
}
#else
extern int net_cls_subsys_id;
diff --git a/include/net/dst.h b/include/net/dst.h
index 81d1413..0238650 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -242,6 +242,7 @@
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
skb->rxhash = 0;
+ skb_set_queue_mapping(skb, 0);
skb_dst_drop(skb);
nf_reset(skb);
}
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index a4747a0..f976885 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -955,6 +955,9 @@
return csum_partial(diff, sizeof(diff), oldsum);
}
+extern void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp,
+ int outin);
+
#endif /* __KERNEL__ */
#endif /* _NET_IP_VS_H */
diff --git a/include/net/route.h b/include/net/route.h
index bd732d6..7e5e73b 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -199,6 +199,8 @@
fl.fl_ip_sport = sport;
fl.fl_ip_dport = dport;
fl.proto = protocol;
+ if (inet_sk(sk)->transparent)
+ fl.flags |= FLOWI_FLAG_ANYSRC;
ip_rt_put(*rp);
*rp = NULL;
security_sk_classify_flow(sk, &fl);
diff --git a/include/net/sock.h b/include/net/sock.h
index ac53bfb..adab9dc 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -752,6 +752,7 @@
/* Keeping track of sk's, looking them up, and port selection methods. */
void (*hash)(struct sock *sk);
void (*unhash)(struct sock *sk);
+ void (*rehash)(struct sock *sk);
int (*get_port)(struct sock *sk, unsigned short snum);
/* Keeping track of sockets in use */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index eaa9582..3e4b33e 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -475,8 +475,22 @@
/* Bound MSS / TSO packet size with the half of the window */
static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
{
- if (tp->max_window && pktsize > (tp->max_window >> 1))
- return max(tp->max_window >> 1, 68U - tp->tcp_header_len);
+ int cutoff;
+
+ /* When peer uses tiny windows, there is no use in packetizing
+ * to sub-MSS pieces for the sake of SWS or making sure there
+ * are enough packets in the pipe for fast recovery.
+ *
+ * On the other hand, for extremely large MSS devices, handling
+ * smaller than MSS windows in this way does make sense.
+ */
+ if (tp->max_window >= 512)
+ cutoff = (tp->max_window >> 1);
+ else
+ cutoff = tp->max_window;
+
+ if (cutoff && pktsize > cutoff)
+ return max_t(int, cutoff, 68U - tp->tcp_header_len);
else
return pktsize;
}
diff --git a/include/net/udp.h b/include/net/udp.h
index 7abdf30..a184d34 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -151,6 +151,7 @@
}
extern void udp_lib_unhash(struct sock *sk);
+extern void udp_lib_rehash(struct sock *sk, u16 new_hash);
static inline void udp_lib_close(struct sock *sk, long timeout)
{
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index fc8f36d..4f53532 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -298,8 +298,8 @@
const struct xfrm_type *type_map[IPPROTO_MAX];
struct xfrm_mode *mode_map[XFRM_MODE_MAX];
int (*init_flags)(struct xfrm_state *x);
- void (*init_tempsel)(struct xfrm_state *x, struct flowi *fl,
- struct xfrm_tmpl *tmpl,
+ void (*init_tempsel)(struct xfrm_selector *sel, struct flowi *fl);
+ void (*init_temprop)(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
xfrm_address_t *daddr, xfrm_address_t *saddr);
int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 192f88c..c9483d8 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1791,19 +1791,20 @@
}
/**
- * cgroup_attach_task_current_cg - attach task 'tsk' to current task's cgroup
+ * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
+ * @from: attach to all cgroups of a given task
* @tsk: the task to be attached
*/
-int cgroup_attach_task_current_cg(struct task_struct *tsk)
+int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
{
struct cgroupfs_root *root;
- struct cgroup *cur_cg;
int retval = 0;
cgroup_lock();
for_each_active_root(root) {
- cur_cg = task_cgroup_from_root(current, root);
- retval = cgroup_attach_task(cur_cg, tsk);
+ struct cgroup *from_cg = task_cgroup_from_root(from, root);
+
+ retval = cgroup_attach_task(from_cg, tsk);
if (retval)
break;
}
@@ -1811,7 +1812,7 @@
return retval;
}
-EXPORT_SYMBOL_GPL(cgroup_attach_task_current_cg);
+EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
/*
* Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex
diff --git a/kernel/compat.c b/kernel/compat.c
index e167efc..c9e2ec0 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -1126,3 +1126,24 @@
return 0;
}
+
+/*
+ * Allocate user-space memory for the duration of a single system call,
+ * in order to marshall parameters inside a compat thunk.
+ */
+void __user *compat_alloc_user_space(unsigned long len)
+{
+ void __user *ptr;
+
+ /* If len would occupy more than half of the entire compat space... */
+ if (unlikely(len > (((compat_uptr_t)~0) >> 1)))
+ return NULL;
+
+ ptr = arch_compat_alloc_user_space(len);
+
+ if (unlikely(!access_ok(VERIFY_WRITE, ptr, len)))
+ return NULL;
+
+ return ptr;
+}
+EXPORT_SYMBOL_GPL(compat_alloc_user_space);
diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c
index 75bd9b3..20059ef 100644
--- a/kernel/debug/kdb/kdb_bp.c
+++ b/kernel/debug/kdb/kdb_bp.c
@@ -274,7 +274,6 @@
int i, bpno;
kdb_bp_t *bp, *bp_check;
int diag;
- int free;
char *symname = NULL;
long offset = 0ul;
int nextarg;
@@ -305,7 +304,6 @@
/*
* Find an empty bp structure to allocate
*/
- free = KDB_MAXBPT;
for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) {
if (bp->bp_free)
break;
diff --git a/kernel/fork.c b/kernel/fork.c
index b7e9d60..c445f8c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -356,10 +356,10 @@
if (IS_ERR(pol))
goto fail_nomem_policy;
vma_set_policy(tmp, pol);
+ tmp->vm_mm = mm;
if (anon_vma_fork(tmp, mpnt))
goto fail_nomem_anon_vma_fork;
tmp->vm_flags &= ~VM_LOCKED;
- tmp->vm_mm = mm;
tmp->vm_next = tmp->vm_prev = NULL;
file = tmp->vm_file;
if (file) {
diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c
index ef3c3f8..f83972b 100644
--- a/kernel/gcov/fs.c
+++ b/kernel/gcov/fs.c
@@ -33,10 +33,11 @@
* @children: child nodes
* @all: list head for list of all nodes
* @parent: parent node
- * @info: associated profiling data structure if not a directory
- * @ghost: when an object file containing profiling data is unloaded we keep a
- * copy of the profiling data here to allow collecting coverage data
- * for cleanup code. Such a node is called a "ghost".
+ * @loaded_info: array of pointers to profiling data sets for loaded object
+ * files.
+ * @num_loaded: number of profiling data sets for loaded object files.
+ * @unloaded_info: accumulated copy of profiling data sets for unloaded
+ * object files. Used only when gcov_persist=1.
* @dentry: main debugfs entry, either a directory or data file
* @links: associated symbolic links
* @name: data file basename
@@ -51,10 +52,11 @@
struct list_head children;
struct list_head all;
struct gcov_node *parent;
- struct gcov_info *info;
- struct gcov_info *ghost;
+ struct gcov_info **loaded_info;
+ struct gcov_info *unloaded_info;
struct dentry *dentry;
struct dentry **links;
+ int num_loaded;
char name[0];
};
@@ -136,16 +138,37 @@
};
/*
- * Return the profiling data set for a given node. This can either be the
- * original profiling data structure or a duplicate (also called "ghost")
- * in case the associated object file has been unloaded.
+ * Return a profiling data set associated with the given node. This is
+ * either a data set for a loaded object file or a data set copy in case
+ * all associated object files have been unloaded.
*/
static struct gcov_info *get_node_info(struct gcov_node *node)
{
- if (node->info)
- return node->info;
+ if (node->num_loaded > 0)
+ return node->loaded_info[0];
- return node->ghost;
+ return node->unloaded_info;
+}
+
+/*
+ * Return a newly allocated profiling data set which contains the sum of
+ * all profiling data associated with the given node.
+ */
+static struct gcov_info *get_accumulated_info(struct gcov_node *node)
+{
+ struct gcov_info *info;
+ int i = 0;
+
+ if (node->unloaded_info)
+ info = gcov_info_dup(node->unloaded_info);
+ else
+ info = gcov_info_dup(node->loaded_info[i++]);
+ if (!info)
+ return NULL;
+ for (; i < node->num_loaded; i++)
+ gcov_info_add(info, node->loaded_info[i]);
+
+ return info;
}
/*
@@ -163,9 +186,10 @@
mutex_lock(&node_lock);
/*
* Read from a profiling data copy to minimize reference tracking
- * complexity and concurrent access.
+ * complexity and concurrent access and to keep accumulating multiple
+ * profiling data sets associated with one node simple.
*/
- info = gcov_info_dup(get_node_info(node));
+ info = get_accumulated_info(node);
if (!info)
goto out_unlock;
iter = gcov_iter_new(info);
@@ -225,12 +249,25 @@
return NULL;
}
+/*
+ * Reset all profiling data associated with the specified node.
+ */
+static void reset_node(struct gcov_node *node)
+{
+ int i;
+
+ if (node->unloaded_info)
+ gcov_info_reset(node->unloaded_info);
+ for (i = 0; i < node->num_loaded; i++)
+ gcov_info_reset(node->loaded_info[i]);
+}
+
static void remove_node(struct gcov_node *node);
/*
* write() implementation for gcov data files. Reset profiling data for the
- * associated file. If the object file has been unloaded (i.e. this is
- * a "ghost" node), remove the debug fs node as well.
+ * corresponding file. If all associated object files have been unloaded,
+ * remove the debug fs node as well.
*/
static ssize_t gcov_seq_write(struct file *file, const char __user *addr,
size_t len, loff_t *pos)
@@ -245,10 +282,10 @@
node = get_node_by_name(info->filename);
if (node) {
/* Reset counts or remove node for unloaded modules. */
- if (node->ghost)
+ if (node->num_loaded == 0)
remove_node(node);
else
- gcov_info_reset(node->info);
+ reset_node(node);
}
/* Reset counts for open file. */
gcov_info_reset(info);
@@ -378,7 +415,10 @@
INIT_LIST_HEAD(&node->list);
INIT_LIST_HEAD(&node->children);
INIT_LIST_HEAD(&node->all);
- node->info = info;
+ if (node->loaded_info) {
+ node->loaded_info[0] = info;
+ node->num_loaded = 1;
+ }
node->parent = parent;
if (name)
strcpy(node->name, name);
@@ -394,9 +434,13 @@
struct gcov_node *node;
node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL);
- if (!node) {
- pr_warning("out of memory\n");
- return NULL;
+ if (!node)
+ goto err_nomem;
+ if (info) {
+ node->loaded_info = kcalloc(1, sizeof(struct gcov_info *),
+ GFP_KERNEL);
+ if (!node->loaded_info)
+ goto err_nomem;
}
init_node(node, info, name, parent);
/* Differentiate between gcov data file nodes and directory nodes. */
@@ -416,6 +460,11 @@
list_add(&node->all, &all_head);
return node;
+
+err_nomem:
+ kfree(node);
+ pr_warning("out of memory\n");
+ return NULL;
}
/* Remove symbolic links associated with node. */
@@ -441,8 +490,9 @@
list_del(&node->all);
debugfs_remove(node->dentry);
remove_links(node);
- if (node->ghost)
- gcov_info_free(node->ghost);
+ kfree(node->loaded_info);
+ if (node->unloaded_info)
+ gcov_info_free(node->unloaded_info);
kfree(node);
}
@@ -477,7 +527,7 @@
/*
* write() implementation for reset file. Reset all profiling data to zero
- * and remove ghost nodes.
+ * and remove nodes for which all associated object files are unloaded.
*/
static ssize_t reset_write(struct file *file, const char __user *addr,
size_t len, loff_t *pos)
@@ -487,8 +537,8 @@
mutex_lock(&node_lock);
restart:
list_for_each_entry(node, &all_head, all) {
- if (node->info)
- gcov_info_reset(node->info);
+ if (node->num_loaded > 0)
+ reset_node(node);
else if (list_empty(&node->children)) {
remove_node(node);
/* Several nodes may have gone - restart loop. */
@@ -564,37 +614,115 @@
}
/*
- * The profiling data set associated with this node is being unloaded. Store a
- * copy of the profiling data and turn this node into a "ghost".
+ * Associate a profiling data set with an existing node. Needs to be called
+ * with node_lock held.
*/
-static int ghost_node(struct gcov_node *node)
+static void add_info(struct gcov_node *node, struct gcov_info *info)
{
- node->ghost = gcov_info_dup(node->info);
- if (!node->ghost) {
- pr_warning("could not save data for '%s' (out of memory)\n",
- node->info->filename);
- return -ENOMEM;
- }
- node->info = NULL;
+ struct gcov_info **loaded_info;
+ int num = node->num_loaded;
- return 0;
+ /*
+ * Prepare new array. This is done first to simplify cleanup in
+ * case the new data set is incompatible, the node only contains
+ * unloaded data sets and there's not enough memory for the array.
+ */
+ loaded_info = kcalloc(num + 1, sizeof(struct gcov_info *), GFP_KERNEL);
+ if (!loaded_info) {
+ pr_warning("could not add '%s' (out of memory)\n",
+ info->filename);
+ return;
+ }
+ memcpy(loaded_info, node->loaded_info,
+ num * sizeof(struct gcov_info *));
+ loaded_info[num] = info;
+ /* Check if the new data set is compatible. */
+ if (num == 0) {
+ /*
+ * A module was unloaded, modified and reloaded. The new
+ * data set replaces the copy of the last one.
+ */
+ if (!gcov_info_is_compatible(node->unloaded_info, info)) {
+ pr_warning("discarding saved data for %s "
+ "(incompatible version)\n", info->filename);
+ gcov_info_free(node->unloaded_info);
+ node->unloaded_info = NULL;
+ }
+ } else {
+ /*
+ * Two different versions of the same object file are loaded.
+ * The initial one takes precedence.
+ */
+ if (!gcov_info_is_compatible(node->loaded_info[0], info)) {
+ pr_warning("could not add '%s' (incompatible "
+ "version)\n", info->filename);
+ kfree(loaded_info);
+ return;
+ }
+ }
+ /* Overwrite previous array. */
+ kfree(node->loaded_info);
+ node->loaded_info = loaded_info;
+ node->num_loaded = num + 1;
}
/*
- * Profiling data for this node has been loaded again. Add profiling data
- * from previous instantiation and turn this node into a regular node.
+ * Return the index of a profiling data set associated with a node.
*/
-static void revive_node(struct gcov_node *node, struct gcov_info *info)
+static int get_info_index(struct gcov_node *node, struct gcov_info *info)
{
- if (gcov_info_is_compatible(node->ghost, info))
- gcov_info_add(info, node->ghost);
- else {
- pr_warning("discarding saved data for '%s' (version changed)\n",
- info->filename);
+ int i;
+
+ for (i = 0; i < node->num_loaded; i++) {
+ if (node->loaded_info[i] == info)
+ return i;
}
- gcov_info_free(node->ghost);
- node->ghost = NULL;
- node->info = info;
+ return -ENOENT;
+}
+
+/*
+ * Save the data of a profiling data set which is being unloaded.
+ */
+static void save_info(struct gcov_node *node, struct gcov_info *info)
+{
+ if (node->unloaded_info)
+ gcov_info_add(node->unloaded_info, info);
+ else {
+ node->unloaded_info = gcov_info_dup(info);
+ if (!node->unloaded_info) {
+ pr_warning("could not save data for '%s' "
+ "(out of memory)\n", info->filename);
+ }
+ }
+}
+
+/*
+ * Disassociate a profiling data set from a node. Needs to be called with
+ * node_lock held.
+ */
+static void remove_info(struct gcov_node *node, struct gcov_info *info)
+{
+ int i;
+
+ i = get_info_index(node, info);
+ if (i < 0) {
+ pr_warning("could not remove '%s' (not found)\n",
+ info->filename);
+ return;
+ }
+ if (gcov_persist)
+ save_info(node, info);
+ /* Shrink array. */
+ node->loaded_info[i] = node->loaded_info[node->num_loaded - 1];
+ node->num_loaded--;
+ if (node->num_loaded > 0)
+ return;
+ /* Last loaded data set was removed. */
+ kfree(node->loaded_info);
+ node->loaded_info = NULL;
+ node->num_loaded = 0;
+ if (!node->unloaded_info)
+ remove_node(node);
}
/*
@@ -609,30 +737,18 @@
node = get_node_by_name(info->filename);
switch (action) {
case GCOV_ADD:
- /* Add new node or revive ghost. */
- if (!node) {
+ if (node)
+ add_info(node, info);
+ else
add_node(info);
- break;
- }
- if (gcov_persist)
- revive_node(node, info);
- else {
- pr_warning("could not add '%s' (already exists)\n",
- info->filename);
- }
break;
case GCOV_REMOVE:
- /* Remove node or turn into ghost. */
- if (!node) {
+ if (node)
+ remove_info(node, info);
+ else {
pr_warning("could not remove '%s' (not found)\n",
info->filename);
- break;
}
- if (gcov_persist) {
- if (!ghost_node(node))
- break;
- }
- remove_node(node);
break;
}
mutex_unlock(&node_lock);
diff --git a/kernel/groups.c b/kernel/groups.c
index 53b1916..253dc0f 100644
--- a/kernel/groups.c
+++ b/kernel/groups.c
@@ -143,10 +143,9 @@
right = group_info->ngroups;
while (left < right) {
unsigned int mid = (left+right)/2;
- int cmp = grp - GROUP_AT(group_info, mid);
- if (cmp > 0)
+ if (grp > GROUP_AT(group_info, mid))
left = mid + 1;
- else if (cmp < 0)
+ else if (grp < GROUP_AT(group_info, mid))
right = mid;
else
return 1;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index ce66917..1decafb 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1091,11 +1091,10 @@
*/
ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
{
- struct hrtimer_clock_base *base;
unsigned long flags;
ktime_t rem;
- base = lock_hrtimer_base(timer, &flags);
+ lock_hrtimer_base(timer, &flags);
rem = hrtimer_expires_remaining(timer);
unlock_hrtimer_base(timer, &flags);
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c
index d71a987..c7c2aed 100644
--- a/kernel/hw_breakpoint.c
+++ b/kernel/hw_breakpoint.c
@@ -433,7 +433,8 @@
perf_overflow_handler_t triggered,
struct task_struct *tsk)
{
- return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered);
+ return perf_event_create_kernel_counter(attr, -1, task_pid_vnr(tsk),
+ triggered);
}
EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 4c0b7b3..200407c 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -36,15 +36,6 @@
# include <asm/mutex.h>
#endif
-/***
- * mutex_init - initialize the mutex
- * @lock: the mutex to be initialized
- * @key: the lock_class_key for the class; used by mutex lock debugging
- *
- * Initialize the mutex to unlocked state.
- *
- * It is not allowed to initialize an already locked mutex.
- */
void
__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
{
@@ -68,7 +59,7 @@
static __used noinline void __sched
__mutex_lock_slowpath(atomic_t *lock_count);
-/***
+/**
* mutex_lock - acquire the mutex
* @lock: the mutex to be acquired
*
@@ -105,7 +96,7 @@
static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
-/***
+/**
* mutex_unlock - release the mutex
* @lock: the mutex to be released
*
@@ -364,8 +355,8 @@
static noinline int __sched
__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
-/***
- * mutex_lock_interruptible - acquire the mutex, interruptable
+/**
+ * mutex_lock_interruptible - acquire the mutex, interruptible
* @lock: the mutex to be acquired
*
* Lock the mutex like mutex_lock(), and return 0 if the mutex has
@@ -456,15 +447,15 @@
return prev == 1;
}
-/***
- * mutex_trylock - try acquire the mutex, without waiting
+/**
+ * mutex_trylock - try to acquire the mutex, without waiting
* @lock: the mutex to be acquired
*
* Try to acquire the mutex atomically. Returns 1 if the mutex
* has been acquired successfully, and 0 on contention.
*
* NOTE: this function follows the spin_trylock() convention, so
- * it is negated to the down_trylock() return values! Be careful
+ * it is negated from the down_trylock() return values! Be careful
* about this when converting semaphore users to mutexes.
*
* This function must not be used in interrupt context. The
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 403d180..db5b560 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -402,11 +402,31 @@
}
}
+static inline int
+event_filter_match(struct perf_event *event)
+{
+ return event->cpu == -1 || event->cpu == smp_processor_id();
+}
+
static void
event_sched_out(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
+ u64 delta;
+ /*
+ * An event which could not be activated because of
+ * filter mismatch still needs to have its timings
+ * maintained, otherwise bogus information is return
+ * via read() for time_enabled, time_running:
+ */
+ if (event->state == PERF_EVENT_STATE_INACTIVE
+ && !event_filter_match(event)) {
+ delta = ctx->time - event->tstamp_stopped;
+ event->tstamp_running += delta;
+ event->tstamp_stopped = ctx->time;
+ }
+
if (event->state != PERF_EVENT_STATE_ACTIVE)
return;
@@ -432,9 +452,7 @@
struct perf_event_context *ctx)
{
struct perf_event *event;
-
- if (group_event->state != PERF_EVENT_STATE_ACTIVE)
- return;
+ int state = group_event->state;
event_sched_out(group_event, cpuctx, ctx);
@@ -444,7 +462,7 @@
list_for_each_entry(event, &group_event->sibling_list, group_entry)
event_sched_out(event, cpuctx, ctx);
- if (group_event->attr.exclusive)
+ if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
cpuctx->exclusive = 0;
}
@@ -5743,15 +5761,15 @@
{
unsigned int cpu = (long)hcpu;
- switch (action) {
+ switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
+ case CPU_DOWN_FAILED:
perf_event_init_cpu(cpu);
break;
+ case CPU_UP_CANCELED:
case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
perf_event_exit_cpu(cpu);
break;
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index b7e4c36..645e541 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -389,10 +389,12 @@
} else if (count == 11) { /* len('0x12345678/0') */
if (copy_from_user(ascii_value, buf, 11))
return -EFAULT;
+ if (strlen(ascii_value) != 10)
+ return -EINVAL;
x = sscanf(ascii_value, "%x", &value);
if (x != 1)
return -EINVAL;
- pr_debug(KERN_ERR "%s, %d, 0x%x\n", ascii_value, x, value);
+ pr_debug("%s, %d, 0x%x\n", ascii_value, x, value);
} else
return -EINVAL;
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index c779639..8dc31e0 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -338,7 +338,6 @@
goto Close;
suspend_console();
- hibernation_freeze_swap();
saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
error = dpm_suspend_start(PMSG_FREEZE);
if (error)
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 5e7edfb..d3f795f 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1086,7 +1086,6 @@
buffer = NULL;
alloc_normal = 0;
alloc_highmem = 0;
- hibernation_thaw_swap();
}
/* Helper functions used for the shrinking of memory. */
@@ -1122,9 +1121,19 @@
return nr_alloc;
}
-static unsigned long preallocate_image_memory(unsigned long nr_pages)
+static unsigned long preallocate_image_memory(unsigned long nr_pages,
+ unsigned long avail_normal)
{
- return preallocate_image_pages(nr_pages, GFP_IMAGE);
+ unsigned long alloc;
+
+ if (avail_normal <= alloc_normal)
+ return 0;
+
+ alloc = avail_normal - alloc_normal;
+ if (nr_pages < alloc)
+ alloc = nr_pages;
+
+ return preallocate_image_pages(alloc, GFP_IMAGE);
}
#ifdef CONFIG_HIGHMEM
@@ -1170,15 +1179,22 @@
*/
static void free_unnecessary_pages(void)
{
- unsigned long save_highmem, to_free_normal, to_free_highmem;
+ unsigned long save, to_free_normal, to_free_highmem;
- to_free_normal = alloc_normal - count_data_pages();
- save_highmem = count_highmem_pages();
- if (alloc_highmem > save_highmem) {
- to_free_highmem = alloc_highmem - save_highmem;
+ save = count_data_pages();
+ if (alloc_normal >= save) {
+ to_free_normal = alloc_normal - save;
+ save = 0;
+ } else {
+ to_free_normal = 0;
+ save -= alloc_normal;
+ }
+ save += count_highmem_pages();
+ if (alloc_highmem >= save) {
+ to_free_highmem = alloc_highmem - save;
} else {
to_free_highmem = 0;
- to_free_normal -= save_highmem - alloc_highmem;
+ to_free_normal -= save - alloc_highmem;
}
memory_bm_position_reset(©_bm);
@@ -1259,7 +1275,7 @@
{
struct zone *zone;
unsigned long saveable, size, max_size, count, highmem, pages = 0;
- unsigned long alloc, save_highmem, pages_highmem;
+ unsigned long alloc, save_highmem, pages_highmem, avail_normal;
struct timeval start, stop;
int error;
@@ -1296,6 +1312,7 @@
else
count += zone_page_state(zone, NR_FREE_PAGES);
}
+ avail_normal = count;
count += highmem;
count -= totalreserve_pages;
@@ -1310,12 +1327,21 @@
*/
if (size >= saveable) {
pages = preallocate_image_highmem(save_highmem);
- pages += preallocate_image_memory(saveable - pages);
+ pages += preallocate_image_memory(saveable - pages, avail_normal);
goto out;
}
/* Estimate the minimum size of the image. */
pages = minimum_image_size(saveable);
+ /*
+ * To avoid excessive pressure on the normal zone, leave room in it to
+ * accommodate an image of the minimum size (unless it's already too
+ * small, in which case don't preallocate pages from it at all).
+ */
+ if (avail_normal > pages)
+ avail_normal -= pages;
+ else
+ avail_normal = 0;
if (size < pages)
size = min_t(unsigned long, pages, max_size);
@@ -1336,16 +1362,34 @@
*/
pages_highmem = preallocate_image_highmem(highmem / 2);
alloc = (count - max_size) - pages_highmem;
- pages = preallocate_image_memory(alloc);
- if (pages < alloc)
- goto err_out;
- size = max_size - size;
- alloc = size;
- size = preallocate_highmem_fraction(size, highmem, count);
- pages_highmem += size;
- alloc -= size;
- pages += preallocate_image_memory(alloc);
- pages += pages_highmem;
+ pages = preallocate_image_memory(alloc, avail_normal);
+ if (pages < alloc) {
+ /* We have exhausted non-highmem pages, try highmem. */
+ alloc -= pages;
+ pages += pages_highmem;
+ pages_highmem = preallocate_image_highmem(alloc);
+ if (pages_highmem < alloc)
+ goto err_out;
+ pages += pages_highmem;
+ /*
+ * size is the desired number of saveable pages to leave in
+ * memory, so try to preallocate (all memory - size) pages.
+ */
+ alloc = (count - pages) - size;
+ pages += preallocate_image_highmem(alloc);
+ } else {
+ /*
+ * There are approximately max_size saveable pages at this point
+ * and we want to reduce this number down to size.
+ */
+ alloc = max_size - size;
+ size = preallocate_highmem_fraction(alloc, highmem, count);
+ pages_highmem += size;
+ alloc -= size;
+ size = preallocate_image_memory(alloc, avail_normal);
+ pages_highmem += preallocate_image_highmem(alloc - size);
+ pages += pages_highmem + size;
+ }
/*
* We only need as many page frames for the image as there are saveable
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 5d0059e..e6a5bdf 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -136,10 +136,10 @@
{
unsigned long offset;
- offset = swp_offset(get_swap_for_hibernation(swap));
+ offset = swp_offset(get_swap_page_of_type(swap));
if (offset) {
if (swsusp_extents_insert(offset))
- swap_free_for_hibernation(swp_entry(swap, offset));
+ swap_free(swp_entry(swap, offset));
else
return swapdev_block(swap, offset);
}
@@ -163,7 +163,7 @@
ext = container_of(node, struct swsusp_extent, node);
rb_erase(node, &swsusp_extents);
for (offset = ext->start; offset <= ext->end; offset++)
- swap_free_for_hibernation(swp_entry(swap, offset));
+ swap_free(swp_entry(swap, offset));
kfree(ext);
}
diff --git a/kernel/sched.c b/kernel/sched.c
index 09b574e..dc85ceb 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1294,6 +1294,10 @@
static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
{
}
+
+static void sched_avg_update(struct rq *rq)
+{
+}
#endif /* CONFIG_SMP */
#if BITS_PER_LONG == 32
@@ -3182,6 +3186,8 @@
this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
}
+
+ sched_avg_update(this_rq);
}
static void update_cpu_load_active(struct rq *this_rq)
@@ -3507,9 +3513,9 @@
rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
if (total) {
- u64 temp;
+ u64 temp = rtime;
- temp = (u64)(rtime * utime);
+ temp *= utime;
do_div(temp, total);
utime = (cputime_t)temp;
} else
@@ -3540,9 +3546,9 @@
rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
if (total) {
- u64 temp;
+ u64 temp = rtime;
- temp = (u64)(rtime * cputime.utime);
+ temp *= cputime.utime;
do_div(temp, total);
utime = (cputime_t)temp;
} else
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index ab661eb..db3f674 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -54,13 +54,13 @@
* Minimal preemption granularity for CPU-bound tasks:
* (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds)
*/
-unsigned int sysctl_sched_min_granularity = 2000000ULL;
-unsigned int normalized_sysctl_sched_min_granularity = 2000000ULL;
+unsigned int sysctl_sched_min_granularity = 750000ULL;
+unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
/*
* is kept at sysctl_sched_latency / sysctl_sched_min_granularity
*/
-static unsigned int sched_nr_latency = 3;
+static unsigned int sched_nr_latency = 8;
/*
* After fork, child runs first. If set to 0 (default) then
@@ -1313,7 +1313,7 @@
find_idlest_group(struct sched_domain *sd, struct task_struct *p,
int this_cpu, int load_idx)
{
- struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
+ struct sched_group *idlest = NULL, *group = sd->groups;
unsigned long min_load = ULONG_MAX, this_load = 0;
int imbalance = 100 + (sd->imbalance_pct-100)/2;
@@ -1348,7 +1348,6 @@
if (local_group) {
this_load = avg_load;
- this = group;
} else if (avg_load < min_load) {
min_load = avg_load;
idlest = group;
@@ -2268,8 +2267,6 @@
struct rq *rq = cpu_rq(cpu);
u64 total, available;
- sched_avg_update(rq);
-
total = sched_avg_period() + (rq->clock - rq->age_stamp);
available = total - rq->rt_avg;
@@ -3633,7 +3630,7 @@
if (time_before(now, nohz.next_balance))
return 0;
- if (!rq->nr_running)
+ if (rq->idle_at_tick)
return 0;
first_pick_cpu = atomic_read(&nohz.first_pick_cpu);
diff --git a/kernel/sys.c b/kernel/sys.c
index e9ad444..7f5a0cd 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -931,6 +931,7 @@
pgid = pid;
if (pgid < 0)
return -EINVAL;
+ rcu_read_lock();
/* From this point forward we keep holding onto the tasklist lock
* so that our parent does not change from under us. -DaveM
@@ -984,6 +985,7 @@
out:
/* All paths lead to here, thus we are safe. -DaveM */
write_unlock_irq(&tasklist_lock);
+ rcu_read_unlock();
return err;
}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index ca38e8e..f88552c 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1713,10 +1713,7 @@
{
sysctl_set_parent(NULL, root_table);
#ifdef CONFIG_SYSCTL_SYSCALL_CHECK
- {
- int err;
- err = sysctl_check_table(current->nsproxy, root_table);
- }
+ sysctl_check_table(current->nsproxy, root_table);
#endif
return 0;
}
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 538501c..6329d06 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -121,7 +121,7 @@
config FUNCTION_TRACER
bool "Kernel Function Tracer"
depends on HAVE_FUNCTION_TRACER
- select FRAME_POINTER
+ select FRAME_POINTER if (!ARM_UNWIND)
select KALLSYMS
select GENERIC_TRACER
select CONTEXT_SWITCH_TRACER
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 0d88ce9..fa7ece6 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -381,12 +381,19 @@
{
struct ftrace_profile *rec = v;
char str[KSYM_SYMBOL_LEN];
+ int ret = 0;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- static DEFINE_MUTEX(mutex);
static struct trace_seq s;
unsigned long long avg;
unsigned long long stddev;
#endif
+ mutex_lock(&ftrace_profile_lock);
+
+ /* we raced with function_profile_reset() */
+ if (unlikely(rec->counter == 0)) {
+ ret = -EBUSY;
+ goto out;
+ }
kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
seq_printf(m, " %-30.30s %10lu", str, rec->counter);
@@ -408,7 +415,6 @@
do_div(stddev, (rec->counter - 1) * 1000);
}
- mutex_lock(&mutex);
trace_seq_init(&s);
trace_print_graph_duration(rec->time, &s);
trace_seq_puts(&s, " ");
@@ -416,11 +422,12 @@
trace_seq_puts(&s, " ");
trace_print_graph_duration(stddev, &s);
trace_print_seq(m, &s);
- mutex_unlock(&mutex);
#endif
seq_putc(m, '\n');
+out:
+ mutex_unlock(&ftrace_profile_lock);
- return 0;
+ return ret;
}
static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
@@ -1503,6 +1510,8 @@
if (*pos > 0)
return t_hash_start(m, pos);
iter->flags |= FTRACE_ITER_PRINTALL;
+ /* reset in case of seek/pread */
+ iter->flags &= ~FTRACE_ITER_HASH;
return iter;
}
@@ -2409,7 +2418,7 @@
.open = ftrace_filter_open,
.read = seq_read,
.write = ftrace_filter_write,
- .llseek = ftrace_regex_lseek,
+ .llseek = no_llseek,
.release = ftrace_filter_release,
};
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 19cccc3..492197e 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2985,13 +2985,11 @@
static void rb_advance_iter(struct ring_buffer_iter *iter)
{
- struct ring_buffer *buffer;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event;
unsigned length;
cpu_buffer = iter->cpu_buffer;
- buffer = cpu_buffer->buffer;
/*
* Check if we are at the end of the buffer.
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 000e6e8..31cc4cb 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -91,6 +91,8 @@
tp_event->class && tp_event->class->reg &&
try_module_get(tp_event->mod)) {
ret = perf_trace_event_init(tp_event, p_event);
+ if (ret)
+ module_put(tp_event->mod);
break;
}
}
@@ -146,6 +148,7 @@
}
}
out:
+ module_put(tp_event->mod);
mutex_unlock(&event_mutex);
}
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 8b27c98..544301d 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -514,8 +514,8 @@
static int kretprobe_dispatcher(struct kretprobe_instance *ri,
struct pt_regs *regs);
-/* Check the name is good for event/group */
-static int check_event_name(const char *name)
+/* Check the name is good for event/group/fields */
+static int is_good_name(const char *name)
{
if (!isalpha(*name) && *name != '_')
return 0;
@@ -557,7 +557,7 @@
else
tp->rp.kp.pre_handler = kprobe_dispatcher;
- if (!event || !check_event_name(event)) {
+ if (!event || !is_good_name(event)) {
ret = -EINVAL;
goto error;
}
@@ -567,7 +567,7 @@
if (!tp->call.name)
goto error;
- if (!group || !check_event_name(group)) {
+ if (!group || !is_good_name(group)) {
ret = -EINVAL;
goto error;
}
@@ -883,7 +883,7 @@
int i, ret = 0;
int is_return = 0, is_delete = 0;
char *symbol = NULL, *event = NULL, *group = NULL;
- char *arg, *tmp;
+ char *arg;
unsigned long offset = 0;
void *addr = NULL;
char buf[MAX_EVENT_NAME_LEN];
@@ -992,26 +992,36 @@
/* parse arguments */
ret = 0;
for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
+ /* Increment count for freeing args in error case */
+ tp->nr_args++;
+
/* Parse argument name */
arg = strchr(argv[i], '=');
- if (arg)
+ if (arg) {
*arg++ = '\0';
- else
+ tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
+ } else {
arg = argv[i];
+ /* If argument name is omitted, set "argN" */
+ snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
+ tp->args[i].name = kstrdup(buf, GFP_KERNEL);
+ }
- tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
if (!tp->args[i].name) {
- pr_info("Failed to allocate argument%d name '%s'.\n",
- i, argv[i]);
+ pr_info("Failed to allocate argument[%d] name.\n", i);
ret = -ENOMEM;
goto error;
}
- tmp = strchr(tp->args[i].name, ':');
- if (tmp)
- *tmp = '_'; /* convert : to _ */
+
+ if (!is_good_name(tp->args[i].name)) {
+ pr_info("Invalid argument[%d] name: %s\n",
+ i, tp->args[i].name);
+ ret = -EINVAL;
+ goto error;
+ }
if (conflict_field_name(tp->args[i].name, tp->args, i)) {
- pr_info("Argument%d name '%s' conflicts with "
+ pr_info("Argument[%d] name '%s' conflicts with "
"another field.\n", i, argv[i]);
ret = -EINVAL;
goto error;
@@ -1020,12 +1030,9 @@
/* Parse fetch argument */
ret = parse_probe_arg(arg, tp, &tp->args[i], is_return);
if (ret) {
- pr_info("Parse error at argument%d. (%d)\n", i, ret);
- kfree(tp->args[i].name);
+ pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
goto error;
}
-
- tp->nr_args++;
}
ret = register_trace_probe(tp);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 0d53c8e..7f9c3c5 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -122,7 +122,7 @@
void touch_softlockup_watchdog(void)
{
- __get_cpu_var(watchdog_touch_ts) = 0;
+ __raw_get_cpu_var(watchdog_touch_ts) = 0;
}
EXPORT_SYMBOL(touch_softlockup_watchdog);
@@ -142,7 +142,14 @@
#ifdef CONFIG_HARDLOCKUP_DETECTOR
void touch_nmi_watchdog(void)
{
- __get_cpu_var(watchdog_nmi_touch) = true;
+ if (watchdog_enabled) {
+ unsigned cpu;
+
+ for_each_present_cpu(cpu) {
+ if (per_cpu(watchdog_nmi_touch, cpu) != true)
+ per_cpu(watchdog_nmi_touch, cpu) = true;
+ }
+ }
touch_softlockup_watchdog();
}
EXPORT_SYMBOL(touch_nmi_watchdog);
@@ -433,6 +440,9 @@
wake_up_process(p);
}
+ /* if any cpu succeeds, watchdog is considered enabled for the system */
+ watchdog_enabled = 1;
+
return 0;
}
@@ -455,9 +465,6 @@
per_cpu(softlockup_watchdog, cpu) = NULL;
kthread_stop(p);
}
-
- /* if any cpu succeeds, watchdog is considered enabled for the system */
- watchdog_enabled = 1;
}
static void watchdog_enable_all_cpus(void)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 727f24e..f77afd9 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1,19 +1,26 @@
/*
- * linux/kernel/workqueue.c
+ * kernel/workqueue.c - generic async execution with shared worker pool
*
- * Generic mechanism for defining kernel helper threads for running
- * arbitrary tasks in process context.
+ * Copyright (C) 2002 Ingo Molnar
*
- * Started by Ingo Molnar, Copyright (C) 2002
- *
- * Derived from the taskqueue/keventd code by:
- *
- * David Woodhouse <dwmw2@infradead.org>
- * Andrew Morton
- * Kai Petzke <wpp@marie.physik.tu-berlin.de>
- * Theodore Ts'o <tytso@mit.edu>
+ * Derived from the taskqueue/keventd code by:
+ * David Woodhouse <dwmw2@infradead.org>
+ * Andrew Morton
+ * Kai Petzke <wpp@marie.physik.tu-berlin.de>
+ * Theodore Ts'o <tytso@mit.edu>
*
* Made to use alloc_percpu by Christoph Lameter.
+ *
+ * Copyright (C) 2010 SUSE Linux Products GmbH
+ * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
+ *
+ * This is the generic async execution mechanism. Work items as are
+ * executed in process context. The worker pool is shared and
+ * automatically managed. There is one worker pool for each CPU and
+ * one extra for works which are better served by workers which are
+ * not bound to any specific CPU.
+ *
+ * Please read Documentation/workqueue.txt for details.
*/
#include <linux/module.h>
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index a5ec428..4ceb05d 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -248,8 +248,18 @@
left -= sg_size;
sg = alloc_fn(alloc_size, gfp_mask);
- if (unlikely(!sg))
- return -ENOMEM;
+ if (unlikely(!sg)) {
+ /*
+ * Adjust entry count to reflect that the last
+ * entry of the previous table won't be used for
+ * linkage. Without this, sg_kfree() may get
+ * confused.
+ */
+ if (prv)
+ table->nents = ++table->orig_nents;
+
+ return -ENOMEM;
+ }
sg_init_table(sg, alloc_size);
table->nents = table->orig_nents += sg_size;
diff --git a/mm/Kconfig b/mm/Kconfig
index f4e516e..f0fb912 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -189,7 +189,7 @@
config MIGRATION
bool "Page migration"
def_bool y
- depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE
+ depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION
help
Allows the migration of the physical location of pages of processes
while the virtual addresses are not changed. This is useful in
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index eaa4a5b..65d4204 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -30,6 +30,7 @@
struct backing_dev_info noop_backing_dev_info = {
.name = "noop",
+ .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
EXPORT_SYMBOL_GPL(noop_backing_dev_info);
@@ -243,6 +244,7 @@
err = bdi_init(&default_backing_dev_info);
if (!err)
bdi_register(&default_backing_dev_info, NULL, "default");
+ err = bdi_init(&noop_backing_dev_info);
return err;
}
@@ -445,8 +447,8 @@
switch (action) {
case FORK_THREAD:
__set_current_state(TASK_RUNNING);
- task = kthread_run(bdi_writeback_thread, &bdi->wb, "flush-%s",
- dev_name(bdi->dev));
+ task = kthread_create(bdi_writeback_thread, &bdi->wb,
+ "flush-%s", dev_name(bdi->dev));
if (IS_ERR(task)) {
/*
* If thread creation fails, force writeout of
@@ -457,10 +459,13 @@
/*
* The spinlock makes sure we do not lose
* wake-ups when racing with 'bdi_queue_work()'.
+ * And as soon as the bdi thread is visible, we
+ * can start it.
*/
spin_lock_bh(&bdi->wb_lock);
bdi->wb.task = task;
spin_unlock_bh(&bdi->wb_lock);
+ wake_up_process(task);
}
break;
diff --git a/mm/bounce.c b/mm/bounce.c
index 13b6dad..1481de6 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -116,8 +116,8 @@
*/
vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
- flush_dcache_page(tovec->bv_page);
bounce_copy_vec(tovec, vfrom);
+ flush_dcache_page(tovec->bv_page);
}
}
diff --git a/mm/compaction.c b/mm/compaction.c
index 94cce51..4d709ee 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -214,15 +214,16 @@
/* Similar to reclaim, but different enough that they don't share logic */
static bool too_many_isolated(struct zone *zone)
{
-
- unsigned long inactive, isolated;
+ unsigned long active, inactive, isolated;
inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
zone_page_state(zone, NR_INACTIVE_ANON);
+ active = zone_page_state(zone, NR_ACTIVE_FILE) +
+ zone_page_state(zone, NR_ACTIVE_ANON);
isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
zone_page_state(zone, NR_ISOLATED_ANON);
- return isolated > inactive;
+ return isolated > (inactive + active) / 2;
}
/*
diff --git a/mm/fremap.c b/mm/fremap.c
index 46f5dac..ec520c7 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -125,7 +125,6 @@
{
struct mm_struct *mm = current->mm;
struct address_space *mapping;
- unsigned long end = start + size;
struct vm_area_struct *vma;
int err = -EINVAL;
int has_write_lock = 0;
@@ -142,6 +141,10 @@
if (start + size <= start)
return err;
+ /* Does pgoff wrap? */
+ if (pgoff + (size >> PAGE_SHIFT) < pgoff)
+ return err;
+
/* Can we represent this offset inside this architecture's pte's? */
#if PTE_FILE_MAX_BITS < BITS_PER_LONG
if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
@@ -168,7 +171,7 @@
if (!(vma->vm_flags & VM_CAN_NONLINEAR))
goto out;
- if (end <= start || start < vma->vm_start || end > vma->vm_end)
+ if (start < vma->vm_start || start + size > vma->vm_end)
goto out;
/* Must set VM_NONLINEAR before any pages are populated. */
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index cc5be78..c032738 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2324,11 +2324,8 @@
* and just make the page writable */
avoidcopy = (page_mapcount(old_page) == 1);
if (avoidcopy) {
- if (!trylock_page(old_page)) {
- if (PageAnon(old_page))
- page_move_anon_rmap(old_page, vma, address);
- } else
- unlock_page(old_page);
+ if (PageAnon(old_page))
+ page_move_anon_rmap(old_page, vma, address);
set_huge_ptep_writable(vma, address, ptep);
return 0;
}
@@ -2404,7 +2401,7 @@
set_huge_pte_at(mm, address, ptep,
make_huge_pte(vma, new_page, 1));
page_remove_rmap(old_page);
- hugepage_add_anon_rmap(new_page, vma, address);
+ hugepage_add_new_anon_rmap(new_page, vma, address);
/* Make the old page be freed below */
new_page = old_page;
mmu_notifier_invalidate_range_end(mm,
@@ -2631,10 +2628,16 @@
vma, address);
}
- if (!pagecache_page) {
- page = pte_page(entry);
+ /*
+ * hugetlb_cow() requires page locks of pte_page(entry) and
+ * pagecache_page, so here we need take the former one
+ * when page != pagecache_page or !pagecache_page.
+ * Note that locking order is always pagecache_page -> page,
+ * so no worry about deadlock.
+ */
+ page = pte_page(entry);
+ if (page != pagecache_page)
lock_page(page);
- }
spin_lock(&mm->page_table_lock);
/* Check for a racing update before calling hugetlb_cow */
@@ -2661,9 +2664,8 @@
if (pagecache_page) {
unlock_page(pagecache_page);
put_page(pagecache_page);
- } else {
- unlock_page(page);
}
+ unlock_page(page);
out_mutex:
mutex_unlock(&hugetlb_instantiation_mutex);
diff --git a/mm/ksm.c b/mm/ksm.c
index e2ae004..b1873cf 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1504,8 +1504,6 @@
{
struct page *new_page;
- unlock_page(page); /* any racers will COW it, not modify it */
-
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
if (new_page) {
copy_user_highpage(new_page, page, address, vma);
@@ -1521,7 +1519,6 @@
add_page_to_unevictable_list(new_page);
}
- page_cache_release(page);
return new_page;
}
diff --git a/mm/memory.c b/mm/memory.c
index 6b2ab10..0e18b4d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2623,7 +2623,7 @@
unsigned int flags, pte_t orig_pte)
{
spinlock_t *ptl;
- struct page *page;
+ struct page *page, *swapcache = NULL;
swp_entry_t entry;
pte_t pte;
struct mem_cgroup *ptr = NULL;
@@ -2679,10 +2679,25 @@
lock_page(page);
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
- page = ksm_might_need_to_copy(page, vma, address);
- if (!page) {
- ret = VM_FAULT_OOM;
- goto out;
+ /*
+ * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
+ * release the swapcache from under us. The page pin, and pte_same
+ * test below, are not enough to exclude that. Even if it is still
+ * swapcache, we need to check that the page's swap has not changed.
+ */
+ if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val))
+ goto out_page;
+
+ if (ksm_might_need_to_copy(page, vma, address)) {
+ swapcache = page;
+ page = ksm_does_need_to_copy(page, vma, address);
+
+ if (unlikely(!page)) {
+ ret = VM_FAULT_OOM;
+ page = swapcache;
+ swapcache = NULL;
+ goto out_page;
+ }
}
if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
@@ -2735,6 +2750,18 @@
if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
try_to_free_swap(page);
unlock_page(page);
+ if (swapcache) {
+ /*
+ * Hold the lock to avoid the swap entry to be reused
+ * until we take the PT lock for the pte_same() check
+ * (to avoid false positives from pte_same). For
+ * further safety release the lock after the swap_free
+ * so that the swap count won't change under a
+ * parallel locked swapcache.
+ */
+ unlock_page(swapcache);
+ page_cache_release(swapcache);
+ }
if (flags & FAULT_FLAG_WRITE) {
ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
@@ -2756,6 +2783,10 @@
unlock_page(page);
out_release:
page_cache_release(page);
+ if (swapcache) {
+ unlock_page(swapcache);
+ page_cache_release(swapcache);
+ }
return ret;
}
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index a4cfcdc..dd186c1 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -584,19 +584,19 @@
/* Return the start of the next active pageblock after a given page */
static struct page *next_active_pageblock(struct page *page)
{
- int pageblocks_stride;
-
/* Ensure the starting page is pageblock-aligned */
BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
- /* Move forward by at least 1 * pageblock_nr_pages */
- pageblocks_stride = 1;
-
/* If the entire pageblock is free, move to the end of free page */
- if (pageblock_free(page))
- pageblocks_stride += page_order(page) - pageblock_order;
+ if (pageblock_free(page)) {
+ int order;
+ /* be careful. we don't have locks, page_order can be changed.*/
+ order = page_order(page);
+ if ((order < MAX_ORDER) && (order >= pageblock_order))
+ return page + (1 << order);
+ }
- return page + (pageblocks_stride * pageblock_nr_pages);
+ return page + pageblock_nr_pages;
}
/* Checks if this range of memory is likely to be hot-removable. */
diff --git a/mm/mlock.c b/mm/mlock.c
index cbae7c5..b70919c 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -135,12 +135,6 @@
}
}
-/* Is the vma a continuation of the stack vma above it? */
-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
-{
- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
-}
-
static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
{
return (vma->vm_flags & VM_GROWSDOWN) &&
diff --git a/mm/mmap.c b/mm/mmap.c
index 6128dc8..00161a4 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2009,6 +2009,7 @@
removed_exe_file_vma(mm);
fput(new->vm_file);
}
+ unlink_anon_vmas(new);
out_free_mpol:
mpol_put(pol);
out_free_vma:
diff --git a/mm/mmzone.c b/mm/mmzone.c
index f5b7d17..e35bfb8 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -87,3 +87,24 @@
return 1;
}
#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
+
+#ifdef CONFIG_SMP
+/* Called when a more accurate view of NR_FREE_PAGES is needed */
+unsigned long zone_nr_free_pages(struct zone *zone)
+{
+ unsigned long nr_free_pages = zone_page_state(zone, NR_FREE_PAGES);
+
+ /*
+ * While kswapd is awake, it is considered the zone is under some
+ * memory pressure. Under pressure, there is a risk that
+ * per-cpu-counter-drift will allow the min watermark to be breached
+ * potentially causing a live-lock. While kswapd is awake and
+ * free pages are low, get a better estimate for free pages
+ */
+ if (nr_free_pages < zone->percpu_drift_mark &&
+ !waitqueue_active(&zone->zone_pgdat->kswapd_wait))
+ return zone_page_state_snapshot(zone, NR_FREE_PAGES);
+
+ return nr_free_pages;
+}
+#endif /* CONFIG_SMP */
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index fc81cb2..4029583 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -121,8 +121,8 @@
}
/* return true if the task is not adequate as candidate victim task. */
-static bool oom_unkillable_task(struct task_struct *p, struct mem_cgroup *mem,
- const nodemask_t *nodemask)
+static bool oom_unkillable_task(struct task_struct *p,
+ const struct mem_cgroup *mem, const nodemask_t *nodemask)
{
if (is_global_init(p))
return true;
@@ -208,8 +208,13 @@
*/
points += p->signal->oom_score_adj;
- if (points < 0)
- return 0;
+ /*
+ * Never return 0 for an eligible task that may be killed since it's
+ * possible that no single user task uses more than 0.1% of memory and
+ * no single admin tasks uses more than 3.0%.
+ */
+ if (points <= 0)
+ return 1;
return (points < 1000) ? points : 1000;
}
@@ -339,26 +344,24 @@
/**
* dump_tasks - dump current memory state of all system tasks
* @mem: current's memory controller, if constrained
+ * @nodemask: nodemask passed to page allocator for mempolicy ooms
*
- * Dumps the current memory state of all system tasks, excluding kernel threads.
+ * Dumps the current memory state of all eligible tasks. Tasks not in the same
+ * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
+ * are not shown.
* State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj
* value, oom_score_adj value, and name.
*
- * If the actual is non-NULL, only tasks that are a member of the mem_cgroup are
- * shown.
- *
* Call with tasklist_lock read-locked.
*/
-static void dump_tasks(const struct mem_cgroup *mem)
+static void dump_tasks(const struct mem_cgroup *mem, const nodemask_t *nodemask)
{
struct task_struct *p;
struct task_struct *task;
pr_info("[ pid ] uid tgid total_vm rss cpu oom_adj oom_score_adj name\n");
for_each_process(p) {
- if (p->flags & PF_KTHREAD)
- continue;
- if (mem && !task_in_mem_cgroup(p, mem))
+ if (oom_unkillable_task(p, mem, nodemask))
continue;
task = find_lock_task_mm(p);
@@ -381,7 +384,7 @@
}
static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
- struct mem_cgroup *mem)
+ struct mem_cgroup *mem, const nodemask_t *nodemask)
{
task_lock(current);
pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
@@ -394,7 +397,7 @@
mem_cgroup_print_oom_info(mem, p);
show_mem();
if (sysctl_oom_dump_tasks)
- dump_tasks(mem);
+ dump_tasks(mem, nodemask);
}
#define K(x) ((x) << (PAGE_SHIFT-10))
@@ -436,7 +439,7 @@
unsigned int victim_points = 0;
if (printk_ratelimit())
- dump_header(p, gfp_mask, order, mem);
+ dump_header(p, gfp_mask, order, mem, nodemask);
/*
* If the task is already exiting, don't alarm the sysadmin or kill
@@ -482,7 +485,7 @@
* Determines whether the kernel must panic because of the panic_on_oom sysctl.
*/
static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
- int order)
+ int order, const nodemask_t *nodemask)
{
if (likely(!sysctl_panic_on_oom))
return;
@@ -496,7 +499,7 @@
return;
}
read_lock(&tasklist_lock);
- dump_header(NULL, gfp_mask, order, NULL);
+ dump_header(NULL, gfp_mask, order, NULL, nodemask);
read_unlock(&tasklist_lock);
panic("Out of memory: %s panic_on_oom is enabled\n",
sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
@@ -509,7 +512,7 @@
unsigned int points = 0;
struct task_struct *p;
- check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0);
+ check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL);
limit = mem_cgroup_get_limit(mem) >> PAGE_SHIFT;
read_lock(&tasklist_lock);
retry:
@@ -641,6 +644,7 @@
void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
int order, nodemask_t *nodemask)
{
+ const nodemask_t *mpol_mask;
struct task_struct *p;
unsigned long totalpages;
unsigned long freed = 0;
@@ -670,7 +674,8 @@
*/
constraint = constrained_alloc(zonelist, gfp_mask, nodemask,
&totalpages);
- check_panic_on_oom(constraint, gfp_mask, order);
+ mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL;
+ check_panic_on_oom(constraint, gfp_mask, order, mpol_mask);
read_lock(&tasklist_lock);
if (sysctl_oom_kill_allocating_task &&
@@ -688,15 +693,13 @@
}
retry:
- p = select_bad_process(&points, totalpages, NULL,
- constraint == CONSTRAINT_MEMORY_POLICY ? nodemask :
- NULL);
+ p = select_bad_process(&points, totalpages, NULL, mpol_mask);
if (PTR_ERR(p) == -1UL)
goto out;
/* Found nothing?!?! Either we hang forever, or we panic. */
if (!p) {
- dump_header(NULL, gfp_mask, order, NULL);
+ dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
read_unlock(&tasklist_lock);
panic("Out of memory and no killable processes...\n");
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a9649f4..a8cfa9c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -588,13 +588,13 @@
{
int migratetype = 0;
int batch_free = 0;
+ int to_free = count;
spin_lock(&zone->lock);
zone->all_unreclaimable = 0;
zone->pages_scanned = 0;
- __mod_zone_page_state(zone, NR_FREE_PAGES, count);
- while (count) {
+ while (to_free) {
struct page *page;
struct list_head *list;
@@ -619,8 +619,9 @@
/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
__free_one_page(page, zone, 0, page_private(page));
trace_mm_page_pcpu_drain(page, 0, page_private(page));
- } while (--count && --batch_free && !list_empty(list));
+ } while (--to_free && --batch_free && !list_empty(list));
}
+ __mod_zone_page_state(zone, NR_FREE_PAGES, count);
spin_unlock(&zone->lock);
}
@@ -631,8 +632,8 @@
zone->all_unreclaimable = 0;
zone->pages_scanned = 0;
- __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
__free_one_page(page, zone, order, migratetype);
+ __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
spin_unlock(&zone->lock);
}
@@ -1461,7 +1462,7 @@
{
/* free_pages my go negative - that's OK */
long min = mark;
- long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
+ long free_pages = zone_nr_free_pages(z) - (1 << order) + 1;
int o;
if (alloc_flags & ALLOC_HIGH)
@@ -1846,6 +1847,7 @@
struct page *page = NULL;
struct reclaim_state reclaim_state;
struct task_struct *p = current;
+ bool drained = false;
cond_resched();
@@ -1864,14 +1866,25 @@
cond_resched();
- if (order != 0)
- drain_all_pages();
+ if (unlikely(!(*did_some_progress)))
+ return NULL;
- if (likely(*did_some_progress))
- page = get_page_from_freelist(gfp_mask, nodemask, order,
+retry:
+ page = get_page_from_freelist(gfp_mask, nodemask, order,
zonelist, high_zoneidx,
alloc_flags, preferred_zone,
migratetype);
+
+ /*
+ * If an allocation failed after direct reclaim, it could be because
+ * pages are pinned on the per-cpu lists. Drain them and try again
+ */
+ if (!page && !drained) {
+ drain_all_pages();
+ drained = true;
+ goto retry;
+ }
+
return page;
}
@@ -2423,7 +2436,7 @@
" all_unreclaimable? %s"
"\n",
zone->name,
- K(zone_page_state(zone, NR_FREE_PAGES)),
+ K(zone_nr_free_pages(zone)),
K(min_wmark_pages(zone)),
K(low_wmark_pages(zone)),
K(high_wmark_pages(zone)),
diff --git a/mm/percpu.c b/mm/percpu.c
index 58c572b..c76ef38 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1401,9 +1401,9 @@
if (pcpu_first_unit_cpu == NR_CPUS)
pcpu_first_unit_cpu = cpu;
+ pcpu_last_unit_cpu = cpu;
}
}
- pcpu_last_unit_cpu = cpu;
pcpu_nr_units = unit;
for_each_possible_cpu(cpu)
diff --git a/mm/rmap.c b/mm/rmap.c
index f6f0d2d..9d2ba01 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1564,13 +1564,14 @@
struct vm_area_struct *vma, unsigned long address, int exclusive)
{
struct anon_vma *anon_vma = vma->anon_vma;
+
BUG_ON(!anon_vma);
- if (!exclusive) {
- struct anon_vma_chain *avc;
- avc = list_entry(vma->anon_vma_chain.prev,
- struct anon_vma_chain, same_vma);
- anon_vma = avc->anon_vma;
- }
+
+ if (PageAnon(page))
+ return;
+ if (!exclusive)
+ anon_vma = anon_vma->root;
+
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
page->mapping = (struct address_space *) anon_vma;
page->index = linear_page_index(vma, address);
@@ -1581,6 +1582,8 @@
{
struct anon_vma *anon_vma = vma->anon_vma;
int first;
+
+ BUG_ON(!PageLocked(page));
BUG_ON(!anon_vma);
BUG_ON(address < vma->vm_start || address >= vma->vm_end);
first = atomic_inc_and_test(&page->_mapcount);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 1f3f9c5..7c703ff 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -47,8 +47,6 @@
long total_swap_pages;
static int least_priority;
-static bool swap_for_hibernation;
-
static const char Bad_file[] = "Bad swap file entry ";
static const char Unused_file[] = "Unused swap file entry ";
static const char Bad_offset[] = "Bad swap offset entry ";
@@ -141,8 +139,7 @@
nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
if (nr_blocks) {
err = blkdev_issue_discard(si->bdev, start_block,
- nr_blocks, GFP_KERNEL,
- BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
+ nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT);
if (err)
return err;
cond_resched();
@@ -153,8 +150,7 @@
nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
err = blkdev_issue_discard(si->bdev, start_block,
- nr_blocks, GFP_KERNEL,
- BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
+ nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT);
if (err)
break;
@@ -193,8 +189,7 @@
start_block <<= PAGE_SHIFT - 9;
nr_blocks <<= PAGE_SHIFT - 9;
if (blkdev_issue_discard(si->bdev, start_block,
- nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT |
- BLKDEV_IFL_BARRIER))
+ nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT))
break;
}
@@ -320,10 +315,8 @@
if (offset > si->highest_bit)
scan_base = offset = si->lowest_bit;
- /* reuse swap entry of cache-only swap if not hibernation. */
- if (vm_swap_full()
- && usage == SWAP_HAS_CACHE
- && si->swap_map[offset] == SWAP_HAS_CACHE) {
+ /* reuse swap entry of cache-only swap if not busy. */
+ if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
int swap_was_freed;
spin_unlock(&swap_lock);
swap_was_freed = __try_to_reclaim_swap(si, offset);
@@ -453,8 +446,6 @@
spin_lock(&swap_lock);
if (nr_swap_pages <= 0)
goto noswap;
- if (swap_for_hibernation)
- goto noswap;
nr_swap_pages--;
for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
@@ -487,6 +478,28 @@
return (swp_entry_t) {0};
}
+/* The only caller of this function is now susupend routine */
+swp_entry_t get_swap_page_of_type(int type)
+{
+ struct swap_info_struct *si;
+ pgoff_t offset;
+
+ spin_lock(&swap_lock);
+ si = swap_info[type];
+ if (si && (si->flags & SWP_WRITEOK)) {
+ nr_swap_pages--;
+ /* This is called for allocating swap entry, not cache */
+ offset = scan_swap_map(si, 1);
+ if (offset) {
+ spin_unlock(&swap_lock);
+ return swp_entry(type, offset);
+ }
+ nr_swap_pages++;
+ }
+ spin_unlock(&swap_lock);
+ return (swp_entry_t) {0};
+}
+
static struct swap_info_struct *swap_info_get(swp_entry_t entry)
{
struct swap_info_struct *p;
@@ -670,6 +683,24 @@
if (page_swapcount(page))
return 0;
+ /*
+ * Once hibernation has begun to create its image of memory,
+ * there's a danger that one of the calls to try_to_free_swap()
+ * - most probably a call from __try_to_reclaim_swap() while
+ * hibernation is allocating its own swap pages for the image,
+ * but conceivably even a call from memory reclaim - will free
+ * the swap from a page which has already been recorded in the
+ * image as a clean swapcache page, and then reuse its swap for
+ * another page of the image. On waking from hibernation, the
+ * original page might be freed under memory pressure, then
+ * later read back in from swap, now with the wrong data.
+ *
+ * Hibernation clears bits from gfp_allowed_mask to prevent
+ * memory reclaim from writing to disk, so check that here.
+ */
+ if (!(gfp_allowed_mask & __GFP_IO))
+ return 0;
+
delete_from_swap_cache(page);
SetPageDirty(page);
return 1;
@@ -746,74 +777,6 @@
#endif
#ifdef CONFIG_HIBERNATION
-
-static pgoff_t hibernation_offset[MAX_SWAPFILES];
-/*
- * Once hibernation starts to use swap, we freeze swap_map[]. Otherwise,
- * saved swap_map[] image to the disk will be an incomplete because it's
- * changing without synchronization with hibernation snap shot.
- * At resume, we just make swap_for_hibernation=false. We can forget
- * used maps easily.
- */
-void hibernation_freeze_swap(void)
-{
- int i;
-
- spin_lock(&swap_lock);
-
- printk(KERN_INFO "PM: Freeze Swap\n");
- swap_for_hibernation = true;
- for (i = 0; i < MAX_SWAPFILES; i++)
- hibernation_offset[i] = 1;
- spin_unlock(&swap_lock);
-}
-
-void hibernation_thaw_swap(void)
-{
- spin_lock(&swap_lock);
- if (swap_for_hibernation) {
- printk(KERN_INFO "PM: Thaw Swap\n");
- swap_for_hibernation = false;
- }
- spin_unlock(&swap_lock);
-}
-
-/*
- * Because updateing swap_map[] can make not-saved-status-change,
- * we use our own easy allocator.
- * Please see kernel/power/swap.c, Used swaps are recorded into
- * RB-tree.
- */
-swp_entry_t get_swap_for_hibernation(int type)
-{
- pgoff_t off;
- swp_entry_t val = {0};
- struct swap_info_struct *si;
-
- spin_lock(&swap_lock);
-
- si = swap_info[type];
- if (!si || !(si->flags & SWP_WRITEOK))
- goto done;
-
- for (off = hibernation_offset[type]; off < si->max; ++off) {
- if (!si->swap_map[off])
- break;
- }
- if (off < si->max) {
- val = swp_entry(type, off);
- hibernation_offset[type] = off + 1;
- }
-done:
- spin_unlock(&swap_lock);
- return val;
-}
-
-void swap_free_for_hibernation(swp_entry_t ent)
-{
- /* Nothing to do */
-}
-
/*
* Find the swap type that corresponds to given device (if any).
*
@@ -2084,7 +2047,7 @@
p->flags |= SWP_SOLIDSTATE;
p->cluster_next = 1 + (random32() % p->highest_bit);
}
- if (discard_swap(p) == 0)
+ if (discard_swap(p) == 0 && (swap_flags & SWAP_FLAG_DISCARD))
p->flags |= SWP_DISCARDABLE;
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c391c32..c5dfabf 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1804,12 +1804,11 @@
* If a zone is deemed to be full of pinned pages then just give it a light
* scan then give up on it.
*/
-static bool shrink_zones(int priority, struct zonelist *zonelist,
+static void shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc)
{
struct zoneref *z;
struct zone *zone;
- bool all_unreclaimable = true;
for_each_zone_zonelist_nodemask(zone, z, zonelist,
gfp_zone(sc->gfp_mask), sc->nodemask) {
@@ -1827,8 +1826,38 @@
}
shrink_zone(priority, zone, sc);
- all_unreclaimable = false;
}
+}
+
+static bool zone_reclaimable(struct zone *zone)
+{
+ return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
+}
+
+/*
+ * As hibernation is going on, kswapd is freezed so that it can't mark
+ * the zone into all_unreclaimable. It can't handle OOM during hibernation.
+ * So let's check zone's unreclaimable in direct reclaim as well as kswapd.
+ */
+static bool all_unreclaimable(struct zonelist *zonelist,
+ struct scan_control *sc)
+{
+ struct zoneref *z;
+ struct zone *zone;
+ bool all_unreclaimable = true;
+
+ for_each_zone_zonelist_nodemask(zone, z, zonelist,
+ gfp_zone(sc->gfp_mask), sc->nodemask) {
+ if (!populated_zone(zone))
+ continue;
+ if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
+ continue;
+ if (zone_reclaimable(zone)) {
+ all_unreclaimable = false;
+ break;
+ }
+ }
+
return all_unreclaimable;
}
@@ -1852,7 +1881,6 @@
struct scan_control *sc)
{
int priority;
- bool all_unreclaimable;
unsigned long total_scanned = 0;
struct reclaim_state *reclaim_state = current->reclaim_state;
struct zoneref *z;
@@ -1869,7 +1897,7 @@
sc->nr_scanned = 0;
if (!priority)
disable_swap_token();
- all_unreclaimable = shrink_zones(priority, zonelist, sc);
+ shrink_zones(priority, zonelist, sc);
/*
* Don't shrink slabs when reclaiming memory from
* over limit cgroups
@@ -1931,7 +1959,7 @@
return sc->nr_reclaimed;
/* top priority shrink_zones still had more to do? don't OOM, then */
- if (scanning_global_lru(sc) && !all_unreclaimable)
+ if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc))
return 1;
return 0;
@@ -2197,8 +2225,7 @@
total_scanned += sc.nr_scanned;
if (zone->all_unreclaimable)
continue;
- if (nr_slab == 0 &&
- zone->pages_scanned >= (zone_reclaimable_pages(zone) * 6))
+ if (nr_slab == 0 && !zone_reclaimable(zone))
zone->all_unreclaimable = 1;
/*
* If we've done a decent amount of scanning and
diff --git a/mm/vmstat.c b/mm/vmstat.c
index f389168..355a9e6 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -138,11 +138,24 @@
int threshold;
for_each_populated_zone(zone) {
+ unsigned long max_drift, tolerate_drift;
+
threshold = calculate_threshold(zone);
for_each_online_cpu(cpu)
per_cpu_ptr(zone->pageset, cpu)->stat_threshold
= threshold;
+
+ /*
+ * Only set percpu_drift_mark if there is a danger that
+ * NR_FREE_PAGES reports the low watermark is ok when in fact
+ * the min watermark could be breached by an allocation
+ */
+ tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
+ max_drift = num_online_cpus() * threshold;
+ if (max_drift > tolerate_drift)
+ zone->percpu_drift_mark = high_wmark_pages(zone) +
+ max_drift;
}
}
@@ -813,7 +826,7 @@
"\n scanned %lu"
"\n spanned %lu"
"\n present %lu",
- zone_page_state(zone, NR_FREE_PAGES),
+ zone_nr_free_pages(zone),
min_wmark_pages(zone),
low_wmark_pages(zone),
high_wmark_pages(zone),
@@ -998,6 +1011,7 @@
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
+ refresh_zone_stat_thresholds();
start_cpu_timer(cpu);
node_set_state(cpu_to_node(cpu), N_CPU);
break;
diff --git a/net/9p/client.c b/net/9p/client.c
index dc6f2f2..9eb7250 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -331,8 +331,10 @@
}
}
- if (c->tagpool)
+ if (c->tagpool) {
+ p9_idpool_put(0, c->tagpool); /* free reserved tag 0 */
p9_idpool_destroy(c->tagpool);
+ }
/* free requests associated with tags */
for (row = 0; row < (c->max_tag/P9_ROW_MAXTAG); row++) {
@@ -944,6 +946,7 @@
int16_t nwqids, count;
err = 0;
+ wqids = NULL;
clnt = oldfid->clnt;
if (clone) {
fid = p9_fid_create(clnt);
@@ -994,9 +997,11 @@
else
fid->qid = oldfid->qid;
+ kfree(wqids);
return fid;
clunk_fid:
+ kfree(wqids);
p9_client_clunk(fid);
fid = NULL;
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 0ea20c3..17c5ba7 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -426,8 +426,10 @@
/* Allocate an fcall for the reply */
rpl_context = kmalloc(sizeof *rpl_context, GFP_KERNEL);
- if (!rpl_context)
+ if (!rpl_context) {
+ err = -ENOMEM;
goto err_close;
+ }
/*
* If the request has a buffer, steal it, otherwise
@@ -445,8 +447,8 @@
}
rpl_context->rc = req->rc;
if (!rpl_context->rc) {
- kfree(rpl_context);
- goto err_close;
+ err = -ENOMEM;
+ goto err_free2;
}
/*
@@ -458,11 +460,8 @@
*/
if (atomic_inc_return(&rdma->rq_count) <= rdma->rq_depth) {
err = post_recv(client, rpl_context);
- if (err) {
- kfree(rpl_context->rc);
- kfree(rpl_context);
- goto err_close;
- }
+ if (err)
+ goto err_free1;
} else
atomic_dec(&rdma->rq_count);
@@ -471,8 +470,10 @@
/* Post the request */
c = kmalloc(sizeof *c, GFP_KERNEL);
- if (!c)
- goto err_close;
+ if (!c) {
+ err = -ENOMEM;
+ goto err_free1;
+ }
c->req = req;
c->busa = ib_dma_map_single(rdma->cm_id->device,
@@ -499,9 +500,15 @@
return ib_post_send(rdma->qp, &wr, &bad_wr);
error:
+ kfree(c);
+ kfree(rpl_context->rc);
+ kfree(rpl_context);
P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n");
return -EIO;
-
+ err_free1:
+ kfree(rpl_context->rc);
+ err_free2:
+ kfree(rpl_context);
err_close:
spin_lock_irqsave(&rdma->req_lock, flags);
if (rdma->state < P9_RDMA_CLOSING) {
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index dcfbe99..b885159 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -329,7 +329,8 @@
mutex_lock(&virtio_9p_lock);
list_for_each_entry(chan, &virtio_chan_list, chan_list) {
- if (!strncmp(devname, chan->tag, chan->tag_len)) {
+ if (!strncmp(devname, chan->tag, chan->tag_len) &&
+ strlen(devname) == chan->tag_len) {
if (!chan->inuse) {
chan->inuse = true;
found = 1;
diff --git a/net/Kconfig b/net/Kconfig
index e330594..e926884 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -217,7 +217,7 @@
config RPS
boolean
- depends on SMP && SYSFS
+ depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
default y
menu "Network testing"
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 651babd..ad2b232 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -399,12 +399,6 @@
unregister_netdev(net_dev);
free_netdev(net_dev);
}
- read_lock_irq(&devs_lock);
- if (list_empty(&br2684_devs)) {
- /* last br2684 device */
- unregister_atmdevice_notifier(&atm_dev_notifier);
- }
- read_unlock_irq(&devs_lock);
return;
}
@@ -675,7 +669,6 @@
if (list_empty(&br2684_devs)) {
/* 1st br2684 device */
- register_atmdevice_notifier(&atm_dev_notifier);
brdev->number = 1;
} else
brdev->number = BRPRIV(list_entry_brdev(br2684_devs.prev))->number + 1;
@@ -815,6 +808,7 @@
return -ENOMEM;
#endif
register_atm_ioctl(&br2684_ioctl_ops);
+ register_atmdevice_notifier(&atm_dev_notifier);
return 0;
}
@@ -830,9 +824,7 @@
#endif
- /* if not already empty */
- if (!list_empty(&br2684_devs))
- unregister_atmdevice_notifier(&atm_dev_notifier);
+ unregister_atmdevice_notifier(&atm_dev_notifier);
while (!list_empty(&br2684_devs)) {
net_dev = list_entry_brdev(br2684_devs.next);
diff --git a/net/core/dev.c b/net/core/dev.c
index 3721fbb..660dd41 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2058,16 +2058,16 @@
struct sk_buff *skb)
{
int queue_index;
- struct sock *sk = skb->sk;
+ const struct net_device_ops *ops = dev->netdev_ops;
- queue_index = sk_tx_queue_get(sk);
- if (queue_index < 0) {
- const struct net_device_ops *ops = dev->netdev_ops;
+ if (ops->ndo_select_queue) {
+ queue_index = ops->ndo_select_queue(dev, skb);
+ queue_index = dev_cap_txqueue(dev, queue_index);
+ } else {
+ struct sock *sk = skb->sk;
+ queue_index = sk_tx_queue_get(sk);
+ if (queue_index < 0) {
- if (ops->ndo_select_queue) {
- queue_index = ops->ndo_select_queue(dev, skb);
- queue_index = dev_cap_txqueue(dev, queue_index);
- } else {
queue_index = 0;
if (dev->real_num_tx_queues > 1)
queue_index = skb_tx_hash(dev, skb);
@@ -4845,7 +4845,7 @@
dev = list_first_entry(head, struct net_device, unreg_list);
call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
- synchronize_net();
+ rcu_barrier();
list_for_each_entry(dev, head, unreg_list)
dev_put(dev);
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 1cd98df..e6b133b 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -35,9 +35,10 @@
* in any case.
*/
-int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
+long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
{
- int size, err, ct;
+ int size, ct;
+ long err;
if (m->msg_namelen) {
if (mode == VERIFY_READ) {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 26396ff..c83b421 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2706,7 +2706,7 @@
} else if (skb_gro_len(p) != pinfo->gso_size)
return -E2BIG;
- headroom = NET_SKB_PAD + NET_IP_ALIGN;
+ headroom = skb_headroom(p);
nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);
if (unlikely(!nskb))
return -ENOMEM;
diff --git a/net/core/sock.c b/net/core/sock.c
index b05b9b6..ef30e9d 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1351,9 +1351,9 @@
{
int uid;
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
return uid;
}
EXPORT_SYMBOL(sock_i_uid);
@@ -1362,9 +1362,9 @@
{
unsigned long ino;
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
return ino;
}
EXPORT_SYMBOL(sock_i_ino);
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index f055094..721a8a3 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -62,8 +62,11 @@
}
if (!inet->inet_saddr)
inet->inet_saddr = rt->rt_src; /* Update source address */
- if (!inet->inet_rcv_saddr)
+ if (!inet->inet_rcv_saddr) {
inet->inet_rcv_saddr = rt->rt_src;
+ if (sk->sk_prot->rehash)
+ sk->sk_prot->rehash(sk);
+ }
inet->inet_daddr = rt->rt_dst;
inet->inet_dport = usin->sin_port;
sk->sk_state = TCP_ESTABLISHED;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index a439689..7d02a9f 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -246,6 +246,7 @@
struct fib_result res;
int no_addr, rpf, accept_local;
+ bool dev_match;
int ret;
struct net *net;
@@ -273,12 +274,22 @@
}
*spec_dst = FIB_RES_PREFSRC(res);
fib_combine_itag(itag, &res);
+ dev_match = false;
+
#ifdef CONFIG_IP_ROUTE_MULTIPATH
- if (FIB_RES_DEV(res) == dev || res.fi->fib_nhs > 1)
+ for (ret = 0; ret < res.fi->fib_nhs; ret++) {
+ struct fib_nh *nh = &res.fi->fib_nh[ret];
+
+ if (nh->nh_dev == dev) {
+ dev_match = true;
+ break;
+ }
+ }
#else
if (FIB_RES_DEV(res) == dev)
+ dev_match = true;
#endif
- {
+ if (dev_match) {
ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
fib_res_put(&res);
return ret;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 79d057a..4a8e370 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -186,7 +186,9 @@
{
struct tnode *ret = node_parent(node);
- return rcu_dereference(ret);
+ return rcu_dereference_check(ret,
+ rcu_read_lock_held() ||
+ lockdep_rtnl_is_held());
}
/* Same as rcu_assign_pointer
@@ -1753,7 +1755,9 @@
static struct leaf *trie_firstleaf(struct trie *t)
{
- struct tnode *n = (struct tnode *) rcu_dereference(t->trie);
+ struct tnode *n = (struct tnode *) rcu_dereference_check(t->trie,
+ rcu_read_lock_held() ||
+ lockdep_rtnl_is_held());
if (!n)
return NULL;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index a1ad0e7..1fdcacd 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -834,7 +834,7 @@
int mark = 0;
- if (len == 8) {
+ if (len == 8 || IGMP_V2_SEEN(in_dev)) {
if (ih->code == 0) {
/* Alas, old v1 router presents here. */
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 945b20a..35c93e8 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -45,7 +45,7 @@
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
-#ifdef CONFIG_IPV6
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
#include <net/ipv6.h>
#include <net/ip6_fib.h>
#include <net/ip6_route.h>
@@ -699,7 +699,7 @@
if ((dst = rt->rt_gateway) == 0)
goto tx_error_icmp;
}
-#ifdef CONFIG_IPV6
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
else if (skb->protocol == htons(ETH_P_IPV6)) {
struct in6_addr *addr6;
int addr_type;
@@ -774,7 +774,7 @@
goto tx_error;
}
}
-#ifdef CONFIG_IPV6
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
else if (skb->protocol == htons(ETH_P_IPV6)) {
struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
@@ -850,7 +850,7 @@
if ((iph->ttl = tiph->ttl) == 0) {
if (skb->protocol == htons(ETH_P_IP))
iph->ttl = old_iph->ttl;
-#ifdef CONFIG_IPV6
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
else if (skb->protocol == htons(ETH_P_IPV6))
iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit;
#endif
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 04b6989..7649d77 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -488,9 +488,8 @@
* we can switch to copy when see the first bad fragment.
*/
if (skb_has_frags(skb)) {
- struct sk_buff *frag;
+ struct sk_buff *frag, *frag2;
int first_len = skb_pagelen(skb);
- int truesizes = 0;
if (first_len - hlen > mtu ||
((first_len - hlen) & 7) ||
@@ -503,18 +502,18 @@
if (frag->len > mtu ||
((frag->len & 7) && frag->next) ||
skb_headroom(frag) < hlen)
- goto slow_path;
+ goto slow_path_clean;
/* Partially cloned skb? */
if (skb_shared(frag))
- goto slow_path;
+ goto slow_path_clean;
BUG_ON(frag->sk);
if (skb->sk) {
frag->sk = skb->sk;
frag->destructor = sock_wfree;
}
- truesizes += frag->truesize;
+ skb->truesize -= frag->truesize;
}
/* Everything is OK. Generate! */
@@ -524,7 +523,6 @@
frag = skb_shinfo(skb)->frag_list;
skb_frag_list_init(skb);
skb->data_len = first_len - skb_headlen(skb);
- skb->truesize -= truesizes;
skb->len = first_len;
iph->tot_len = htons(first_len);
iph->frag_off = htons(IP_MF);
@@ -576,6 +574,15 @@
}
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
return err;
+
+slow_path_clean:
+ skb_walk_frags(skb, frag2) {
+ if (frag2 == frag)
+ break;
+ frag2->sk = NULL;
+ frag2->destructor = NULL;
+ skb->truesize += frag2->truesize;
+ }
}
slow_path:
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 6c40a8c..64b70ad 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -1129,6 +1129,9 @@
case IP_HDRINCL:
val = inet->hdrincl;
break;
+ case IP_NODEFRAG:
+ val = inet->nodefrag;
+ break;
case IP_MTU_DISCOVER:
val = inet->pmtudisc;
break;
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index b254daf..43eec80 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -112,6 +112,7 @@
/* ip_route_me_harder expects skb->dst to be set */
skb_dst_set_noref(nskb, skb_dst(oldskb));
+ nskb->protocol = htons(ETH_P_IP);
if (ip_route_me_harder(nskb, addr_type))
goto free_nskb;
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index eab8de3..f3a9b42 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -66,9 +66,11 @@
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
+ struct sock *sk = skb->sk;
struct inet_sock *inet = inet_sk(skb->sk);
- if (inet && inet->nodefrag)
+ if (sk && (sk->sk_family == PF_INET) &&
+ inet->nodefrag)
return NF_ACCEPT;
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 1679e2c..ee5f419 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -893,13 +893,15 @@
unsigned char s[4];
if (offset & 1) {
- s[0] = s[2] = 0;
+ s[0] = ~0;
s[1] = ~*optr;
+ s[2] = 0;
s[3] = *nptr;
} else {
- s[1] = s[3] = 0;
s[0] = ~*optr;
+ s[1] = ~0;
s[2] = *nptr;
+ s[3] = 0;
}
*csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum)));
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 3f56b6e..ac6559c 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1231,7 +1231,7 @@
}
if (net_ratelimit())
- printk(KERN_WARNING "Neighbour table overflow.\n");
+ printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
rt_drop(rt);
return -ENOBUFS;
}
@@ -2738,6 +2738,11 @@
}
EXPORT_SYMBOL_GPL(__ip_route_output_key);
+static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
+{
+ return NULL;
+}
+
static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
{
}
@@ -2746,7 +2751,7 @@
.family = AF_INET,
.protocol = cpu_to_be16(ETH_P_IP),
.destroy = ipv4_dst_destroy,
- .check = ipv4_dst_check,
+ .check = ipv4_blackhole_dst_check,
.update_pmtu = ipv4_rt_blackhole_update_pmtu,
.entries = ATOMIC_INIT(0),
};
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 3fb1428..f115ea6 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -386,8 +386,6 @@
*/
mask = 0;
- if (sk->sk_err)
- mask = POLLERR;
/*
* POLLHUP is certainly not done right. But poll() doesn't
@@ -457,6 +455,11 @@
if (tp->urg_data & TCP_URG_VALID)
mask |= POLLPRI;
}
+ /* This barrier is coupled with smp_wmb() in tcp_reset() */
+ smp_rmb();
+ if (sk->sk_err)
+ mask |= POLLERR;
+
return mask;
}
EXPORT_SYMBOL(tcp_poll);
@@ -940,7 +943,7 @@
sg = sk->sk_route_caps & NETIF_F_SG;
while (--iovlen >= 0) {
- int seglen = iov->iov_len;
+ size_t seglen = iov->iov_len;
unsigned char __user *from = iov->iov_base;
iov++;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e663b78..b55f60f 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2545,7 +2545,8 @@
cnt += tcp_skb_pcount(skb);
if (cnt > packets) {
- if (tcp_is_sack(tp) || (oldcnt >= packets))
+ if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) ||
+ (oldcnt >= packets))
break;
mss = skb_shinfo(skb)->gso_size;
@@ -4048,6 +4049,8 @@
default:
sk->sk_err = ECONNRESET;
}
+ /* This barrier is coupled with smp_rmb() in tcp_poll() */
+ smp_wmb();
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_error_report(sk);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 32e0bef..fb23c2e 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1260,6 +1260,49 @@
}
EXPORT_SYMBOL(udp_lib_unhash);
+/*
+ * inet_rcv_saddr was changed, we must rehash secondary hash
+ */
+void udp_lib_rehash(struct sock *sk, u16 newhash)
+{
+ if (sk_hashed(sk)) {
+ struct udp_table *udptable = sk->sk_prot->h.udp_table;
+ struct udp_hslot *hslot, *hslot2, *nhslot2;
+
+ hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
+ nhslot2 = udp_hashslot2(udptable, newhash);
+ udp_sk(sk)->udp_portaddr_hash = newhash;
+ if (hslot2 != nhslot2) {
+ hslot = udp_hashslot(udptable, sock_net(sk),
+ udp_sk(sk)->udp_port_hash);
+ /* we must lock primary chain too */
+ spin_lock_bh(&hslot->lock);
+
+ spin_lock(&hslot2->lock);
+ hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
+ hslot2->count--;
+ spin_unlock(&hslot2->lock);
+
+ spin_lock(&nhslot2->lock);
+ hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
+ &nhslot2->head);
+ nhslot2->count++;
+ spin_unlock(&nhslot2->lock);
+
+ spin_unlock_bh(&hslot->lock);
+ }
+ }
+}
+EXPORT_SYMBOL(udp_lib_rehash);
+
+static void udp_v4_rehash(struct sock *sk)
+{
+ u16 new_hash = udp4_portaddr_hash(sock_net(sk),
+ inet_sk(sk)->inet_rcv_saddr,
+ inet_sk(sk)->inet_num);
+ udp_lib_rehash(sk, new_hash);
+}
+
static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int rc;
@@ -1843,6 +1886,7 @@
.backlog_rcv = __udp_queue_rcv_skb,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
+ .rehash = udp_v4_rehash,
.get_port = udp_v4_get_port,
.memory_allocated = &udp_memory_allocated,
.sysctl_mem = sysctl_udp_mem,
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 869078d..a580349 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -61,7 +61,7 @@
static int xfrm4_get_tos(struct flowi *fl)
{
- return fl->fl4_tos;
+ return IPTOS_RT_MASK & fl->fl4_tos; /* Strip ECN bits */
}
static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index 1ef1366..4794762 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -21,21 +21,25 @@
}
static void
-__xfrm4_init_tempsel(struct xfrm_state *x, struct flowi *fl,
- struct xfrm_tmpl *tmpl,
- xfrm_address_t *daddr, xfrm_address_t *saddr)
+__xfrm4_init_tempsel(struct xfrm_selector *sel, struct flowi *fl)
{
- x->sel.daddr.a4 = fl->fl4_dst;
- x->sel.saddr.a4 = fl->fl4_src;
- x->sel.dport = xfrm_flowi_dport(fl);
- x->sel.dport_mask = htons(0xffff);
- x->sel.sport = xfrm_flowi_sport(fl);
- x->sel.sport_mask = htons(0xffff);
- x->sel.family = AF_INET;
- x->sel.prefixlen_d = 32;
- x->sel.prefixlen_s = 32;
- x->sel.proto = fl->proto;
- x->sel.ifindex = fl->oif;
+ sel->daddr.a4 = fl->fl4_dst;
+ sel->saddr.a4 = fl->fl4_src;
+ sel->dport = xfrm_flowi_dport(fl);
+ sel->dport_mask = htons(0xffff);
+ sel->sport = xfrm_flowi_sport(fl);
+ sel->sport_mask = htons(0xffff);
+ sel->family = AF_INET;
+ sel->prefixlen_d = 32;
+ sel->prefixlen_s = 32;
+ sel->proto = fl->proto;
+ sel->ifindex = fl->oif;
+}
+
+static void
+xfrm4_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
+ xfrm_address_t *daddr, xfrm_address_t *saddr)
+{
x->id = tmpl->id;
if (x->id.daddr.a4 == 0)
x->id.daddr.a4 = daddr->a4;
@@ -70,6 +74,7 @@
.owner = THIS_MODULE,
.init_flags = xfrm4_init_flags,
.init_tempsel = __xfrm4_init_tempsel,
+ .init_temprop = xfrm4_init_temprop,
.output = xfrm4_output,
.extract_input = xfrm4_extract_input,
.extract_output = xfrm4_extract_output,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index ab70a3f..324fac3 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -4637,10 +4637,12 @@
if (err < 0) {
printk(KERN_CRIT "IPv6 Addrconf:"
" cannot initialize default policy table: %d.\n", err);
- return err;
+ goto out;
}
- register_pernet_subsys(&addrconf_ops);
+ err = register_pernet_subsys(&addrconf_ops);
+ if (err < 0)
+ goto out_addrlabel;
/* The addrconf netdev notifier requires that loopback_dev
* has it's ipv6 private information allocated and setup
@@ -4692,7 +4694,9 @@
unregister_netdevice_notifier(&ipv6_dev_notf);
errlo:
unregister_pernet_subsys(&addrconf_ops);
-
+out_addrlabel:
+ ipv6_addr_label_cleanup();
+out:
return err;
}
@@ -4703,6 +4707,7 @@
unregister_netdevice_notifier(&ipv6_dev_notf);
unregister_pernet_subsys(&addrconf_ops);
+ ipv6_addr_label_cleanup();
rtnl_lock();
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index f0e774c..8175f80 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -393,6 +393,11 @@
return register_pernet_subsys(&ipv6_addr_label_ops);
}
+void ipv6_addr_label_cleanup(void)
+{
+ unregister_pernet_subsys(&ipv6_addr_label_ops);
+}
+
static const struct nla_policy ifal_policy[IFAL_MAX+1] = {
[IFAL_ADDRESS] = { .len = sizeof(struct in6_addr), },
[IFAL_LABEL] = { .len = sizeof(u32), },
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 7d929a2..ef371aa 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -105,9 +105,12 @@
if (ipv6_addr_any(&np->saddr))
ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
- if (ipv6_addr_any(&np->rcv_saddr))
+ if (ipv6_addr_any(&np->rcv_saddr)) {
ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
&np->rcv_saddr);
+ if (sk->sk_prot->rehash)
+ sk->sk_prot->rehash(sk);
+ }
goto out;
}
@@ -181,6 +184,8 @@
if (ipv6_addr_any(&np->rcv_saddr)) {
ipv6_addr_copy(&np->rcv_saddr, &fl.fl6_src);
inet->inet_rcv_saddr = LOOPBACK4_IPV6;
+ if (sk->sk_prot->rehash)
+ sk->sk_prot->rehash(sk);
}
ip6_dst_store(sk, dst,
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index d40b330..980912e 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -639,7 +639,7 @@
if (skb_has_frags(skb)) {
int first_len = skb_pagelen(skb);
- int truesizes = 0;
+ struct sk_buff *frag2;
if (first_len - hlen > mtu ||
((first_len - hlen) & 7) ||
@@ -651,18 +651,18 @@
if (frag->len > mtu ||
((frag->len & 7) && frag->next) ||
skb_headroom(frag) < hlen)
- goto slow_path;
+ goto slow_path_clean;
/* Partially cloned skb? */
if (skb_shared(frag))
- goto slow_path;
+ goto slow_path_clean;
BUG_ON(frag->sk);
if (skb->sk) {
frag->sk = skb->sk;
frag->destructor = sock_wfree;
- truesizes += frag->truesize;
}
+ skb->truesize -= frag->truesize;
}
err = 0;
@@ -693,7 +693,6 @@
first_len = skb_pagelen(skb);
skb->data_len = first_len - skb_headlen(skb);
- skb->truesize -= truesizes;
skb->len = first_len;
ipv6_hdr(skb)->payload_len = htons(first_len -
sizeof(struct ipv6hdr));
@@ -756,6 +755,15 @@
IPSTATS_MIB_FRAGFAILS);
dst_release(&rt->dst);
return err;
+
+slow_path_clean:
+ skb_walk_frags(skb, frag2) {
+ if (frag2 == frag)
+ break;
+ frag2->sk = NULL;
+ frag2->destructor = NULL;
+ skb->truesize += frag2->truesize;
+ }
}
slow_path:
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 13ef5bc..578f3c1 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -113,14 +113,6 @@
kfree_skb(NFCT_FRAG6_CB(skb)->orig);
}
-/* Memory Tracking Functions. */
-static void frag_kfree_skb(struct sk_buff *skb)
-{
- atomic_sub(skb->truesize, &nf_init_frags.mem);
- nf_skb_free(skb);
- kfree_skb(skb);
-}
-
/* Destruction primitives. */
static __inline__ void fq_put(struct nf_ct_frag6_queue *fq)
@@ -282,66 +274,22 @@
}
found:
- /* We found where to put this one. Check for overlap with
- * preceding fragment, and, if needed, align things so that
- * any overlaps are eliminated.
+ /* RFC5722, Section 4:
+ * When reassembling an IPv6 datagram, if
+ * one or more its constituent fragments is determined to be an
+ * overlapping fragment, the entire datagram (and any constituent
+ * fragments, including those not yet received) MUST be silently
+ * discarded.
*/
- if (prev) {
- int i = (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset;
- if (i > 0) {
- offset += i;
- if (end <= offset) {
- pr_debug("overlap\n");
- goto err;
- }
- if (!pskb_pull(skb, i)) {
- pr_debug("Can't pull\n");
- goto err;
- }
- if (skb->ip_summed != CHECKSUM_UNNECESSARY)
- skb->ip_summed = CHECKSUM_NONE;
- }
- }
+ /* Check for overlap with preceding fragment. */
+ if (prev &&
+ (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset > 0)
+ goto discard_fq;
- /* Look for overlap with succeeding segments.
- * If we can merge fragments, do it.
- */
- while (next && NFCT_FRAG6_CB(next)->offset < end) {
- /* overlap is 'i' bytes */
- int i = end - NFCT_FRAG6_CB(next)->offset;
-
- if (i < next->len) {
- /* Eat head of the next overlapped fragment
- * and leave the loop. The next ones cannot overlap.
- */
- pr_debug("Eat head of the overlapped parts.: %d", i);
- if (!pskb_pull(next, i))
- goto err;
-
- /* next fragment */
- NFCT_FRAG6_CB(next)->offset += i;
- fq->q.meat -= i;
- if (next->ip_summed != CHECKSUM_UNNECESSARY)
- next->ip_summed = CHECKSUM_NONE;
- break;
- } else {
- struct sk_buff *free_it = next;
-
- /* Old fragmnet is completely overridden with
- * new one drop it.
- */
- next = next->next;
-
- if (prev)
- prev->next = next;
- else
- fq->q.fragments = next;
-
- fq->q.meat -= free_it->len;
- frag_kfree_skb(free_it);
- }
- }
+ /* Look for overlap with succeeding segment. */
+ if (next && NFCT_FRAG6_CB(next)->offset < end)
+ goto discard_fq;
NFCT_FRAG6_CB(skb)->offset = offset;
@@ -371,6 +319,8 @@
write_unlock(&nf_frags.lock);
return 0;
+discard_fq:
+ fq_kill(fq);
err:
return -1;
}
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 545c414..64cfef1 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -149,13 +149,6 @@
}
EXPORT_SYMBOL(ip6_frag_match);
-/* Memory Tracking Functions. */
-static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb)
-{
- atomic_sub(skb->truesize, &nf->mem);
- kfree_skb(skb);
-}
-
void ip6_frag_init(struct inet_frag_queue *q, void *a)
{
struct frag_queue *fq = container_of(q, struct frag_queue, q);
@@ -346,58 +339,22 @@
}
found:
- /* We found where to put this one. Check for overlap with
- * preceding fragment, and, if needed, align things so that
- * any overlaps are eliminated.
+ /* RFC5722, Section 4:
+ * When reassembling an IPv6 datagram, if
+ * one or more its constituent fragments is determined to be an
+ * overlapping fragment, the entire datagram (and any constituent
+ * fragments, including those not yet received) MUST be silently
+ * discarded.
*/
- if (prev) {
- int i = (FRAG6_CB(prev)->offset + prev->len) - offset;
- if (i > 0) {
- offset += i;
- if (end <= offset)
- goto err;
- if (!pskb_pull(skb, i))
- goto err;
- if (skb->ip_summed != CHECKSUM_UNNECESSARY)
- skb->ip_summed = CHECKSUM_NONE;
- }
- }
+ /* Check for overlap with preceding fragment. */
+ if (prev &&
+ (FRAG6_CB(prev)->offset + prev->len) - offset > 0)
+ goto discard_fq;
- /* Look for overlap with succeeding segments.
- * If we can merge fragments, do it.
- */
- while (next && FRAG6_CB(next)->offset < end) {
- int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */
-
- if (i < next->len) {
- /* Eat head of the next overlapped fragment
- * and leave the loop. The next ones cannot overlap.
- */
- if (!pskb_pull(next, i))
- goto err;
- FRAG6_CB(next)->offset += i; /* next fragment */
- fq->q.meat -= i;
- if (next->ip_summed != CHECKSUM_UNNECESSARY)
- next->ip_summed = CHECKSUM_NONE;
- break;
- } else {
- struct sk_buff *free_it = next;
-
- /* Old fragment is completely overridden with
- * new one drop it.
- */
- next = next->next;
-
- if (prev)
- prev->next = next;
- else
- fq->q.fragments = next;
-
- fq->q.meat -= free_it->len;
- frag_kfree_skb(fq->q.net, free_it);
- }
- }
+ /* Look for overlap with succeeding segment. */
+ if (next && FRAG6_CB(next)->offset < end)
+ goto discard_fq;
FRAG6_CB(skb)->offset = offset;
@@ -436,6 +393,8 @@
write_unlock(&ip6_frags.lock);
return -1;
+discard_fq:
+ fq_kill(fq);
err:
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_REASMFAILS);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index d126365..8323136 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -670,7 +670,7 @@
if (net_ratelimit())
printk(KERN_WARNING
- "Neighbour table overflow.\n");
+ "ipv6: Neighbour table overflow.\n");
dst_free(&rt->dst);
return NULL;
}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 1dd1aff..5acb356 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -111,6 +111,15 @@
return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr);
}
+static void udp_v6_rehash(struct sock *sk)
+{
+ u16 new_hash = udp6_portaddr_hash(sock_net(sk),
+ &inet6_sk(sk)->rcv_saddr,
+ inet_sk(sk)->inet_num);
+
+ udp_lib_rehash(sk, new_hash);
+}
+
static inline int compute_score(struct sock *sk, struct net *net,
unsigned short hnum,
struct in6_addr *saddr, __be16 sport,
@@ -1447,6 +1456,7 @@
.backlog_rcv = udpv6_queue_rcv_skb,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
+ .rehash = udp_v6_rehash,
.get_port = udp_v6_get_port,
.memory_allocated = &udp_memory_allocated,
.sysctl_mem = sysctl_udp_mem,
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index f417b77..a67575d 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -20,23 +20,27 @@
#include <net/addrconf.h>
static void
-__xfrm6_init_tempsel(struct xfrm_state *x, struct flowi *fl,
- struct xfrm_tmpl *tmpl,
- xfrm_address_t *daddr, xfrm_address_t *saddr)
+__xfrm6_init_tempsel(struct xfrm_selector *sel, struct flowi *fl)
{
/* Initialize temporary selector matching only
* to current session. */
- ipv6_addr_copy((struct in6_addr *)&x->sel.daddr, &fl->fl6_dst);
- ipv6_addr_copy((struct in6_addr *)&x->sel.saddr, &fl->fl6_src);
- x->sel.dport = xfrm_flowi_dport(fl);
- x->sel.dport_mask = htons(0xffff);
- x->sel.sport = xfrm_flowi_sport(fl);
- x->sel.sport_mask = htons(0xffff);
- x->sel.family = AF_INET6;
- x->sel.prefixlen_d = 128;
- x->sel.prefixlen_s = 128;
- x->sel.proto = fl->proto;
- x->sel.ifindex = fl->oif;
+ ipv6_addr_copy((struct in6_addr *)&sel->daddr, &fl->fl6_dst);
+ ipv6_addr_copy((struct in6_addr *)&sel->saddr, &fl->fl6_src);
+ sel->dport = xfrm_flowi_dport(fl);
+ sel->dport_mask = htons(0xffff);
+ sel->sport = xfrm_flowi_sport(fl);
+ sel->sport_mask = htons(0xffff);
+ sel->family = AF_INET6;
+ sel->prefixlen_d = 128;
+ sel->prefixlen_s = 128;
+ sel->proto = fl->proto;
+ sel->ifindex = fl->oif;
+}
+
+static void
+xfrm6_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
+ xfrm_address_t *daddr, xfrm_address_t *saddr)
+{
x->id = tmpl->id;
if (ipv6_addr_any((struct in6_addr*)&x->id.daddr))
memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr));
@@ -168,6 +172,7 @@
.eth_proto = htons(ETH_P_IPV6),
.owner = THIS_MODULE,
.init_tempsel = __xfrm6_init_tempsel,
+ .init_temprop = xfrm6_init_temprop,
.tmpl_sort = __xfrm6_tmpl_sort,
.state_sort = __xfrm6_state_sort,
.output = xfrm6_output,
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index a788f9e..6130f9d 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -1102,7 +1102,7 @@
memcpy(&val_len, buf+n, 2); /* To avoid alignment problems */
le16_to_cpus(&val_len); n+=2;
- if (val_len > 1016) {
+ if (val_len >= 1016) {
IRDA_DEBUG(2, "%s(), parameter length to long\n", __func__ );
return -RSP_INVALID_COMMAND_FORMAT;
}
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 023ba82..5826129 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -1024,7 +1024,8 @@
{
struct sock *sk = sock->sk;
struct llc_sock *llc = llc_sk(sk);
- int rc = -EINVAL, opt;
+ unsigned int opt;
+ int rc = -EINVAL;
lock_sock(sk);
if (unlikely(level != SOL_LLC || optlen != sizeof(int)))
diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
index e4dae02..cf4aea3 100644
--- a/net/llc/llc_station.c
+++ b/net/llc/llc_station.c
@@ -689,7 +689,7 @@
int __init llc_station_init(void)
{
- u16 rc = -ENOBUFS;
+ int rc = -ENOBUFS;
struct sk_buff *skb;
struct llc_station_state_ev *ev;
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 4f8ddba4..4c2f89d 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -924,6 +924,7 @@
ip_vs_out_stats(cp, skb);
ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp);
+ ip_vs_update_conntrack(skb, cp, 0);
ip_vs_conn_put(cp);
skb->ipvs_property = 1;
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 33b329b..7e9af5b 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -410,7 +410,6 @@
union nf_inet_addr to;
__be16 port;
struct ip_vs_conn *n_cp;
- struct nf_conn *ct;
#ifdef CONFIG_IP_VS_IPV6
/* This application helper doesn't work with IPv6 yet,
@@ -497,11 +496,6 @@
ip_vs_control_add(n_cp, cp);
}
- ct = (struct nf_conn *)skb->nfct;
- if (ct && ct != &nf_conntrack_untracked)
- ip_vs_expect_related(skb, ct, n_cp,
- IPPROTO_TCP, &n_cp->dport, 1);
-
/*
* Move tunnel to listen state
*/
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 21e1a5e..49df6be 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -349,8 +349,8 @@
}
#endif
-static void
-ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp)
+void
+ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin)
{
struct nf_conn *ct = (struct nf_conn *)skb->nfct;
struct nf_conntrack_tuple new_tuple;
@@ -365,11 +365,17 @@
* real-server we will see RIP->DIP.
*/
new_tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
- new_tuple.src.u3 = cp->daddr;
+ if (outin)
+ new_tuple.src.u3 = cp->daddr;
+ else
+ new_tuple.dst.u3 = cp->vaddr;
/*
* This will also take care of UDP and other protocols.
*/
- new_tuple.src.u.tcp.port = cp->dport;
+ if (outin)
+ new_tuple.src.u.tcp.port = cp->dport;
+ else
+ new_tuple.dst.u.tcp.port = cp->vport;
nf_conntrack_alter_reply(ct, &new_tuple);
}
@@ -428,7 +434,7 @@
IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
- ip_vs_update_conntrack(skb, cp);
+ ip_vs_update_conntrack(skb, cp, 1);
/* FIXME: when application helper enlarges the packet and the length
is larger than the MTU of outgoing device, there will be still
@@ -506,7 +512,7 @@
IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
- ip_vs_update_conntrack(skb, cp);
+ ip_vs_update_conntrack(skb, cp, 1);
/* FIXME: when application helper enlarges the packet and the length
is larger than the MTU of outgoing device, there will be still
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 7dcf7a4..8d9e4c94 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -48,15 +48,17 @@
{
unsigned int off, len;
struct nf_ct_ext_type *t;
+ size_t alloc_size;
rcu_read_lock();
t = rcu_dereference(nf_ct_ext_types[id]);
BUG_ON(t == NULL);
off = ALIGN(sizeof(struct nf_ct_ext), t->align);
len = off + t->len;
+ alloc_size = t->alloc_size;
rcu_read_unlock();
- *ext = kzalloc(t->alloc_size, gfp);
+ *ext = kzalloc(alloc_size, gfp);
if (!*ext)
return NULL;
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 53d8922..f64de954 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1376,7 +1376,7 @@
unsigned int msglen, origlen;
const char *dptr, *end;
s16 diff, tdiff = 0;
- int ret;
+ int ret = NF_ACCEPT;
typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust;
if (ctinfo != IP_CT_ESTABLISHED &&
diff --git a/net/netfilter/nf_tproxy_core.c b/net/netfilter/nf_tproxy_core.c
index 5490fc3..daab8c4 100644
--- a/net/netfilter/nf_tproxy_core.c
+++ b/net/netfilter/nf_tproxy_core.c
@@ -70,7 +70,11 @@
int
nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
{
- if (inet_sk(sk)->transparent) {
+ bool transparent = (sk->sk_state == TCP_TIME_WAIT) ?
+ inet_twsk(sk)->tw_transparent :
+ inet_sk(sk)->transparent;
+
+ if (transparent) {
skb_orphan(skb);
skb->sk = sk;
skb->destructor = nf_tproxy_destructor;
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index c397524..c519939 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -43,7 +43,7 @@
struct rds_connection *conn;
struct rds_tcp_connection *tc;
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
conn = sk->sk_user_data;
if (conn == NULL) {
state_change = sk->sk_state_change;
@@ -68,7 +68,7 @@
break;
}
out:
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
state_change(sk);
}
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 975183f..27844f2 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -114,7 +114,7 @@
rdsdebug("listen data ready sk %p\n", sk);
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
ready = sk->sk_user_data;
if (ready == NULL) { /* check for teardown race */
ready = sk->sk_data_ready;
@@ -131,7 +131,7 @@
queue_work(rds_wq, &rds_tcp_listen_work);
out:
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
ready(sk, bytes);
}
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index 1aba687..e437974 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -324,7 +324,7 @@
rdsdebug("data ready sk %p bytes %d\n", sk, bytes);
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
conn = sk->sk_user_data;
if (conn == NULL) { /* check for teardown race */
ready = sk->sk_data_ready;
@@ -338,7 +338,7 @@
if (rds_tcp_read_sock(conn, GFP_ATOMIC, KM_SOFTIRQ0) == -ENOMEM)
queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
out:
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
ready(sk, bytes);
}
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index a28b895..2f012a0 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -224,7 +224,7 @@
struct rds_connection *conn;
struct rds_tcp_connection *tc;
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
conn = sk->sk_user_data;
if (conn == NULL) {
write_space = sk->sk_write_space;
@@ -244,7 +244,7 @@
queue_delayed_work(rds_wq, &conn->c_send_w, 0);
out:
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
/*
* write_space is only called when data leaves tcp's send queue if
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 8e45e76..d952e7e 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -679,7 +679,7 @@
if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
return -EINVAL;
- if (addr->srose_ndigis > ROSE_MAX_DIGIS)
+ if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
return -EINVAL;
if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) {
@@ -739,7 +739,7 @@
if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
return -EINVAL;
- if (addr->srose_ndigis > ROSE_MAX_DIGIS)
+ if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
return -EINVAL;
/* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 34066278..6318e11 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -255,10 +255,6 @@
error = -EINVAL;
goto err_out;
}
- if (!list_empty(&flow->list)) {
- error = -EEXIST;
- goto err_out;
- }
} else {
int i;
unsigned long cl;
diff --git a/net/sctp/output.c b/net/sctp/output.c
index a646681..bcc4590 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -92,7 +92,6 @@
SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__,
packet, vtag);
- sctp_packet_reset(packet);
packet->vtag = vtag;
if (ecn_capable && sctp_packet_empty(packet)) {
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 24b2cd5..d344dc4 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -1232,6 +1232,18 @@
return 0;
}
+static bool list_has_sctp_addr(const struct list_head *list,
+ union sctp_addr *ipaddr)
+{
+ struct sctp_transport *addr;
+
+ list_for_each_entry(addr, list, transports) {
+ if (sctp_cmp_addr_exact(ipaddr, &addr->ipaddr))
+ return true;
+ }
+
+ return false;
+}
/* A restart is occurring, check to make sure no new addresses
* are being added as we may be under a takeover attack.
*/
@@ -1240,10 +1252,10 @@
struct sctp_chunk *init,
sctp_cmd_seq_t *commands)
{
- struct sctp_transport *new_addr, *addr;
- int found;
+ struct sctp_transport *new_addr;
+ int ret = 1;
- /* Implementor's Guide - Sectin 5.2.2
+ /* Implementor's Guide - Section 5.2.2
* ...
* Before responding the endpoint MUST check to see if the
* unexpected INIT adds new addresses to the association. If new
@@ -1254,31 +1266,19 @@
/* Search through all current addresses and make sure
* we aren't adding any new ones.
*/
- new_addr = NULL;
- found = 0;
-
list_for_each_entry(new_addr, &new_asoc->peer.transport_addr_list,
- transports) {
- found = 0;
- list_for_each_entry(addr, &asoc->peer.transport_addr_list,
- transports) {
- if (sctp_cmp_addr_exact(&new_addr->ipaddr,
- &addr->ipaddr)) {
- found = 1;
- break;
- }
- }
- if (!found)
+ transports) {
+ if (!list_has_sctp_addr(&asoc->peer.transport_addr_list,
+ &new_addr->ipaddr)) {
+ sctp_sf_send_restart_abort(&new_addr->ipaddr, init,
+ commands);
+ ret = 0;
break;
- }
-
- /* If a new address was added, ABORT the sender. */
- if (!found && new_addr) {
- sctp_sf_send_restart_abort(&new_addr->ipaddr, init, commands);
+ }
}
/* Return success if all addresses were found. */
- return found;
+ return ret;
}
/* Populate the verification/tie tags based on overlapping INIT
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 36cb660..e9eaaf7 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -38,7 +38,7 @@
static LIST_HEAD(cred_unused);
static unsigned long number_cred_unused;
-#define MAX_HASHTABLE_BITS (10)
+#define MAX_HASHTABLE_BITS (14)
static int param_set_hashtbl_sz(const char *val, const struct kernel_param *kp)
{
unsigned long num;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index dcfc66b..12c4859 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -745,17 +745,18 @@
struct rpc_inode *rpci = RPC_I(inode);
struct gss_upcall_msg *gss_msg;
+restart:
spin_lock(&inode->i_lock);
- while (!list_empty(&rpci->in_downcall)) {
+ list_for_each_entry(gss_msg, &rpci->in_downcall, list) {
- gss_msg = list_entry(rpci->in_downcall.next,
- struct gss_upcall_msg, list);
+ if (!list_empty(&gss_msg->msg.list))
+ continue;
gss_msg->msg.errno = -EPIPE;
atomic_inc(&gss_msg->count);
__gss_unhash_msg(gss_msg);
spin_unlock(&inode->i_lock);
gss_release_msg(gss_msg);
- spin_lock(&inode->i_lock);
+ goto restart;
}
spin_unlock(&inode->i_lock);
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 0326446..778e5df 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -237,6 +237,7 @@
if (!supported_gss_krb5_enctype(alg)) {
printk(KERN_WARNING "gss_kerberos_mech: unsupported "
"encryption key algorithm %d\n", alg);
+ p = ERR_PTR(-EINVAL);
goto out_err;
}
p = simple_get_netobj(p, end, &key);
@@ -282,15 +283,19 @@
ctx->enctype = ENCTYPE_DES_CBC_RAW;
ctx->gk5e = get_gss_krb5_enctype(ctx->enctype);
- if (ctx->gk5e == NULL)
+ if (ctx->gk5e == NULL) {
+ p = ERR_PTR(-EINVAL);
goto out_err;
+ }
/* The downcall format was designed before we completely understood
* the uses of the context fields; so it includes some stuff we
* just give some minimal sanity-checking, and some we ignore
* completely (like the next twenty bytes): */
- if (unlikely(p + 20 > end || p + 20 < p))
+ if (unlikely(p + 20 > end || p + 20 < p)) {
+ p = ERR_PTR(-EFAULT);
goto out_err;
+ }
p += 20;
p = simple_get_bytes(p, end, &tmp, sizeof(tmp));
if (IS_ERR(p))
@@ -619,6 +624,7 @@
if (ctx->seq_send64 != ctx->seq_send) {
dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__,
(long unsigned)ctx->seq_send64, ctx->seq_send);
+ p = ERR_PTR(-EINVAL);
goto out_err;
}
p = simple_get_bytes(p, end, &ctx->enctype, sizeof(ctx->enctype));
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c
index dc3f1f5..adade3d 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_mech.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c
@@ -100,6 +100,7 @@
if (version != 1) {
dprintk("RPC: unknown spkm3 token format: "
"obsolete nfs-utils?\n");
+ p = ERR_PTR(-EINVAL);
goto out_err_free_ctx;
}
@@ -135,8 +136,10 @@
if (IS_ERR(p))
goto out_err_free_intg_alg;
- if (p != end)
+ if (p != end) {
+ p = ERR_PTR(-EFAULT);
goto out_err_free_intg_key;
+ }
ctx_id->internal_ctx_id = ctx;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 2388d83..fa55490 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -226,7 +226,7 @@
goto out_no_principal;
}
- kref_init(&clnt->cl_kref);
+ atomic_set(&clnt->cl_count, 1);
err = rpc_setup_pipedir(clnt, program->pipe_dir_name);
if (err < 0)
@@ -390,14 +390,14 @@
if (new->cl_principal == NULL)
goto out_no_principal;
}
- kref_init(&new->cl_kref);
+ atomic_set(&new->cl_count, 1);
err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
if (err != 0)
goto out_no_path;
if (new->cl_auth)
atomic_inc(&new->cl_auth->au_count);
xprt_get(clnt->cl_xprt);
- kref_get(&clnt->cl_kref);
+ atomic_inc(&clnt->cl_count);
rpc_register_client(new);
rpciod_up();
return new;
@@ -465,10 +465,8 @@
* Free an RPC client
*/
static void
-rpc_free_client(struct kref *kref)
+rpc_free_client(struct rpc_clnt *clnt)
{
- struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref);
-
dprintk("RPC: destroying %s client for %s\n",
clnt->cl_protname, clnt->cl_server);
if (!IS_ERR(clnt->cl_path.dentry)) {
@@ -495,12 +493,10 @@
* Free an RPC client
*/
static void
-rpc_free_auth(struct kref *kref)
+rpc_free_auth(struct rpc_clnt *clnt)
{
- struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref);
-
if (clnt->cl_auth == NULL) {
- rpc_free_client(kref);
+ rpc_free_client(clnt);
return;
}
@@ -509,10 +505,11 @@
* release remaining GSS contexts. This mechanism ensures
* that it can do so safely.
*/
- kref_init(kref);
+ atomic_inc(&clnt->cl_count);
rpcauth_release(clnt->cl_auth);
clnt->cl_auth = NULL;
- kref_put(kref, rpc_free_client);
+ if (atomic_dec_and_test(&clnt->cl_count))
+ rpc_free_client(clnt);
}
/*
@@ -525,7 +522,8 @@
if (list_empty(&clnt->cl_tasks))
wake_up(&destroy_wait);
- kref_put(&clnt->cl_kref, rpc_free_auth);
+ if (atomic_dec_and_test(&clnt->cl_count))
+ rpc_free_auth(clnt);
}
/**
@@ -588,7 +586,7 @@
if (clnt != NULL) {
rpc_task_release_client(task);
task->tk_client = clnt;
- kref_get(&clnt->cl_kref);
+ atomic_inc(&clnt->cl_count);
if (clnt->cl_softrtry)
task->tk_flags |= RPC_TASK_SOFT;
/* Add to the client's list of all tasks */
@@ -931,7 +929,7 @@
task->tk_status = 0;
if (status >= 0) {
if (task->tk_rqstp) {
- task->tk_action = call_allocate;
+ task->tk_action = call_refresh;
return;
}
@@ -966,13 +964,54 @@
}
/*
- * 2. Allocate the buffer. For details, see sched.c:rpc_malloc.
+ * 2. Bind and/or refresh the credentials
+ */
+static void
+call_refresh(struct rpc_task *task)
+{
+ dprint_status(task);
+
+ task->tk_action = call_refreshresult;
+ task->tk_status = 0;
+ task->tk_client->cl_stats->rpcauthrefresh++;
+ rpcauth_refreshcred(task);
+}
+
+/*
+ * 2a. Process the results of a credential refresh
+ */
+static void
+call_refreshresult(struct rpc_task *task)
+{
+ int status = task->tk_status;
+
+ dprint_status(task);
+
+ task->tk_status = 0;
+ task->tk_action = call_allocate;
+ if (status >= 0 && rpcauth_uptodatecred(task))
+ return;
+ switch (status) {
+ case -EACCES:
+ rpc_exit(task, -EACCES);
+ return;
+ case -ENOMEM:
+ rpc_exit(task, -ENOMEM);
+ return;
+ case -ETIMEDOUT:
+ rpc_delay(task, 3*HZ);
+ }
+ task->tk_action = call_refresh;
+}
+
+/*
+ * 2b. Allocate the buffer. For details, see sched.c:rpc_malloc.
* (Note: buffer memory is freed in xprt_release).
*/
static void
call_allocate(struct rpc_task *task)
{
- unsigned int slack = task->tk_client->cl_auth->au_cslack;
+ unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack;
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = task->tk_xprt;
struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
@@ -980,7 +1019,7 @@
dprint_status(task);
task->tk_status = 0;
- task->tk_action = call_refresh;
+ task->tk_action = call_bind;
if (req->rq_buffer)
return;
@@ -1017,47 +1056,6 @@
rpc_exit(task, -ERESTARTSYS);
}
-/*
- * 2a. Bind and/or refresh the credentials
- */
-static void
-call_refresh(struct rpc_task *task)
-{
- dprint_status(task);
-
- task->tk_action = call_refreshresult;
- task->tk_status = 0;
- task->tk_client->cl_stats->rpcauthrefresh++;
- rpcauth_refreshcred(task);
-}
-
-/*
- * 2b. Process the results of a credential refresh
- */
-static void
-call_refreshresult(struct rpc_task *task)
-{
- int status = task->tk_status;
-
- dprint_status(task);
-
- task->tk_status = 0;
- task->tk_action = call_bind;
- if (status >= 0 && rpcauth_uptodatecred(task))
- return;
- switch (status) {
- case -EACCES:
- rpc_exit(task, -EACCES);
- return;
- case -ENOMEM:
- rpc_exit(task, -ENOMEM);
- return;
- case -ETIMEDOUT:
- rpc_delay(task, 3*HZ);
- }
- task->tk_action = call_refresh;
-}
-
static inline int
rpc_task_need_encode(struct rpc_task *task)
{
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 95ccbcf..8c8eef2 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -48,7 +48,7 @@
return;
do {
msg = list_entry(head->next, struct rpc_pipe_msg, list);
- list_del(&msg->list);
+ list_del_init(&msg->list);
msg->errno = err;
destroy_msg(msg);
} while (!list_empty(head));
@@ -208,7 +208,7 @@
if (msg != NULL) {
spin_lock(&inode->i_lock);
msg->errno = -EAGAIN;
- list_del(&msg->list);
+ list_del_init(&msg->list);
spin_unlock(&inode->i_lock);
rpci->ops->destroy_msg(msg);
}
@@ -268,7 +268,7 @@
if (res < 0 || msg->len == msg->copied) {
filp->private_data = NULL;
spin_lock(&inode->i_lock);
- list_del(&msg->list);
+ list_del_init(&msg->list);
spin_unlock(&inode->i_lock);
rpci->ops->destroy_msg(msg);
}
@@ -371,21 +371,23 @@
static int
rpc_info_open(struct inode *inode, struct file *file)
{
- struct rpc_clnt *clnt;
+ struct rpc_clnt *clnt = NULL;
int ret = single_open(file, rpc_show_info, NULL);
if (!ret) {
struct seq_file *m = file->private_data;
- mutex_lock(&inode->i_mutex);
- clnt = RPC_I(inode)->private;
- if (clnt) {
- kref_get(&clnt->cl_kref);
+
+ spin_lock(&file->f_path.dentry->d_lock);
+ if (!d_unhashed(file->f_path.dentry))
+ clnt = RPC_I(inode)->private;
+ if (clnt != NULL && atomic_inc_not_zero(&clnt->cl_count)) {
+ spin_unlock(&file->f_path.dentry->d_lock);
m->private = clnt;
} else {
+ spin_unlock(&file->f_path.dentry->d_lock);
single_release(inode, file);
ret = -EINVAL;
}
- mutex_unlock(&inode->i_mutex);
}
return ret;
}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index b6309db..fe9306b 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -800,7 +800,7 @@
u32 _xid;
__be32 *xp;
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
dprintk("RPC: xs_udp_data_ready...\n");
if (!(xprt = xprt_from_sock(sk)))
goto out;
@@ -852,7 +852,7 @@
dropit:
skb_free_datagram(sk, skb);
out:
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
}
static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
@@ -1229,7 +1229,7 @@
dprintk("RPC: xs_tcp_data_ready...\n");
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
if (!(xprt = xprt_from_sock(sk)))
goto out;
if (xprt->shutdown)
@@ -1248,7 +1248,7 @@
read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
} while (read > 0);
out:
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
}
/*
@@ -1301,7 +1301,7 @@
{
struct rpc_xprt *xprt;
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
if (!(xprt = xprt_from_sock(sk)))
goto out;
dprintk("RPC: xs_tcp_state_change client %p...\n", xprt);
@@ -1313,7 +1313,7 @@
switch (sk->sk_state) {
case TCP_ESTABLISHED:
- spin_lock_bh(&xprt->transport_lock);
+ spin_lock(&xprt->transport_lock);
if (!xprt_test_and_set_connected(xprt)) {
struct sock_xprt *transport = container_of(xprt,
struct sock_xprt, xprt);
@@ -1327,7 +1327,7 @@
xprt_wake_pending_tasks(xprt, -EAGAIN);
}
- spin_unlock_bh(&xprt->transport_lock);
+ spin_unlock(&xprt->transport_lock);
break;
case TCP_FIN_WAIT1:
/* The client initiated a shutdown of the socket */
@@ -1365,7 +1365,7 @@
xs_sock_mark_closed(xprt);
}
out:
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
}
/**
@@ -1376,7 +1376,7 @@
{
struct rpc_xprt *xprt;
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
if (!(xprt = xprt_from_sock(sk)))
goto out;
dprintk("RPC: %s client %p...\n"
@@ -1384,7 +1384,7 @@
__func__, xprt, sk->sk_err);
xprt_wake_pending_tasks(xprt, -EAGAIN);
out:
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
}
static void xs_write_space(struct sock *sk)
@@ -1416,13 +1416,13 @@
*/
static void xs_udp_write_space(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
/* from net/core/sock.c:sock_def_write_space */
if (sock_writeable(sk))
xs_write_space(sk);
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
}
/**
@@ -1437,13 +1437,13 @@
*/
static void xs_tcp_write_space(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
/* from net/core/stream.c:sk_stream_write_space */
if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
xs_write_space(sk);
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
}
static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 4414a18..0b39b24 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -692,6 +692,7 @@
static u32 ordernum = 1;
struct unix_address *addr;
int err;
+ unsigned int retries = 0;
mutex_lock(&u->readlock);
@@ -717,9 +718,17 @@
if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
addr->hash)) {
spin_unlock(&unix_table_lock);
- /* Sanity yield. It is unusual case, but yet... */
- if (!(ordernum&0xFF))
- yield();
+ /*
+ * __unix_find_socket_byname() may take long time if many names
+ * are already in use.
+ */
+ cond_resched();
+ /* Give up if all names seems to be in use. */
+ if (retries++ == 0xFFFFF) {
+ err = -ENOSPC;
+ kfree(addr);
+ goto out;
+ }
goto retry;
}
addr->hash ^= sk->sk_type;
diff --git a/net/wireless/wext-priv.c b/net/wireless/wext-priv.c
index 3feb28e..674d426 100644
--- a/net/wireless/wext-priv.c
+++ b/net/wireless/wext-priv.c
@@ -152,7 +152,7 @@
} else if (!iwp->pointer)
return -EFAULT;
- extra = kmalloc(extra_size, GFP_KERNEL);
+ extra = kzalloc(extra_size, GFP_KERNEL);
if (!extra)
return -ENOMEM;
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index a3cca0a..64f2ae1 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -101,7 +101,7 @@
err = -EHOSTUNREACH;
goto error_nolock;
}
- skb_dst_set_noref(skb, dst);
+ skb_dst_set(skb, dst_clone(dst));
x = dst->xfrm;
} while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL));
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 2b3ed7a..cbab6e1 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1175,9 +1175,8 @@
tmpl->mode == XFRM_MODE_BEET) {
remote = &tmpl->id.daddr;
local = &tmpl->saddr;
- family = tmpl->encap_family;
- if (xfrm_addr_any(local, family)) {
- error = xfrm_get_saddr(net, &tmp, remote, family);
+ if (xfrm_addr_any(local, tmpl->encap_family)) {
+ error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family);
if (error)
goto fail;
local = &tmp;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 5208b12f..eb96ce5 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -656,15 +656,23 @@
EXPORT_SYMBOL(xfrm_sad_getinfo);
static int
-xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
- struct xfrm_tmpl *tmpl,
- xfrm_address_t *daddr, xfrm_address_t *saddr,
- unsigned short family)
+xfrm_init_tempstate(struct xfrm_state *x, struct flowi *fl,
+ struct xfrm_tmpl *tmpl,
+ xfrm_address_t *daddr, xfrm_address_t *saddr,
+ unsigned short family)
{
struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
if (!afinfo)
return -1;
- afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
+ afinfo->init_tempsel(&x->sel, fl);
+
+ if (family != tmpl->encap_family) {
+ xfrm_state_put_afinfo(afinfo);
+ afinfo = xfrm_state_get_afinfo(tmpl->encap_family);
+ if (!afinfo)
+ return -1;
+ }
+ afinfo->init_temprop(x, tmpl, daddr, saddr);
xfrm_state_put_afinfo(afinfo);
return 0;
}
@@ -790,37 +798,38 @@
int error = 0;
struct xfrm_state *best = NULL;
u32 mark = pol->mark.v & pol->mark.m;
+ unsigned short encap_family = tmpl->encap_family;
to_put = NULL;
spin_lock_bh(&xfrm_state_lock);
- h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, family);
+ h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
- if (x->props.family == family &&
+ if (x->props.family == encap_family &&
x->props.reqid == tmpl->reqid &&
(mark & x->mark.m) == x->mark.v &&
!(x->props.flags & XFRM_STATE_WILDRECV) &&
- xfrm_state_addr_check(x, daddr, saddr, family) &&
+ xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
tmpl->mode == x->props.mode &&
tmpl->id.proto == x->id.proto &&
(tmpl->id.spi == x->id.spi || !tmpl->id.spi))
- xfrm_state_look_at(pol, x, fl, family, daddr, saddr,
+ xfrm_state_look_at(pol, x, fl, encap_family, daddr, saddr,
&best, &acquire_in_progress, &error);
}
if (best)
goto found;
- h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family);
+ h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) {
- if (x->props.family == family &&
+ if (x->props.family == encap_family &&
x->props.reqid == tmpl->reqid &&
(mark & x->mark.m) == x->mark.v &&
!(x->props.flags & XFRM_STATE_WILDRECV) &&
- xfrm_state_addr_check(x, daddr, saddr, family) &&
+ xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
tmpl->mode == x->props.mode &&
tmpl->id.proto == x->id.proto &&
(tmpl->id.spi == x->id.spi || !tmpl->id.spi))
- xfrm_state_look_at(pol, x, fl, family, daddr, saddr,
+ xfrm_state_look_at(pol, x, fl, encap_family, daddr, saddr,
&best, &acquire_in_progress, &error);
}
@@ -829,7 +838,7 @@
if (!x && !error && !acquire_in_progress) {
if (tmpl->id.spi &&
(x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi,
- tmpl->id.proto, family)) != NULL) {
+ tmpl->id.proto, encap_family)) != NULL) {
to_put = x0;
error = -EEXIST;
goto out;
@@ -839,9 +848,9 @@
error = -ENOMEM;
goto out;
}
- /* Initialize temporary selector matching only
+ /* Initialize temporary state matching only
* to current session. */
- xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
+ xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family);
memcpy(&x->mark, &pol->mark, sizeof(x->mark));
error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
@@ -856,10 +865,10 @@
x->km.state = XFRM_STATE_ACQ;
list_add(&x->km.all, &net->xfrm.state_all);
hlist_add_head(&x->bydst, net->xfrm.state_bydst+h);
- h = xfrm_src_hash(net, daddr, saddr, family);
+ h = xfrm_src_hash(net, daddr, saddr, encap_family);
hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h);
if (x->id.spi) {
- h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, family);
+ h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
hlist_add_head(&x->byspi, net->xfrm.state_byspi+h);
}
x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index a1a5cf9..108eeb9 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -212,7 +212,8 @@
cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \
"$(if $(CONFIG_CPU_BIG_ENDIAN),big,little)" \
"$(if $(CONFIG_64BIT),64,32)" \
- "$(OBJDUMP)" "$(OBJCOPY)" "$(CC)" "$(LD)" "$(NM)" "$(RM)" "$(MV)" \
+ "$(OBJDUMP)" "$(OBJCOPY)" "$(CC) $(KBUILD_CFLAGS)" \
+ "$(LD)" "$(NM)" "$(RM)" "$(MV)" \
"$(if $(part-of-module),1,0)" "$(@)";
endif
diff --git a/scripts/basic/docproc.c b/scripts/basic/docproc.c
index 79ab973..fc3b18d 100644
--- a/scripts/basic/docproc.c
+++ b/scripts/basic/docproc.c
@@ -34,12 +34,14 @@
*
*/
+#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <unistd.h>
#include <limits.h>
+#include <errno.h>
#include <sys/types.h>
#include <sys/wait.h>
@@ -54,6 +56,7 @@
FILEONLY *internalfunctions;
FILEONLY *externalfunctions;
FILEONLY *symbolsonly;
+FILEONLY *findall;
typedef void FILELINE(char * file, char * line);
FILELINE * singlefunctions;
@@ -65,12 +68,30 @@
#define KERNELDOCPATH "scripts/"
#define KERNELDOC "kernel-doc"
#define DOCBOOK "-docbook"
+#define LIST "-list"
#define FUNCTION "-function"
#define NOFUNCTION "-nofunction"
#define NODOCSECTIONS "-no-doc-sections"
static char *srctree, *kernsrctree;
+static char **all_list = NULL;
+static int all_list_len = 0;
+
+static void consume_symbol(const char *sym)
+{
+ int i;
+
+ for (i = 0; i < all_list_len; i++) {
+ if (!all_list[i])
+ continue;
+ if (strcmp(sym, all_list[i]))
+ continue;
+ all_list[i] = NULL;
+ break;
+ }
+}
+
static void usage (void)
{
fprintf(stderr, "Usage: docproc {doc|depend} file\n");
@@ -248,6 +269,7 @@
struct symfile * sym = &symfilelist[i];
for (j=0; j < sym->symbolcnt; j++) {
vec[idx++] = type;
+ consume_symbol(sym->symbollist[j].name);
vec[idx++] = sym->symbollist[j].name;
}
}
@@ -287,6 +309,11 @@
vec[idx++] = &line[i];
}
}
+ for (i = 0; i < idx; i++) {
+ if (strcmp(vec[i], FUNCTION))
+ continue;
+ consume_symbol(vec[i + 1]);
+ }
vec[idx++] = filename;
vec[idx] = NULL;
exec_kernel_doc(vec);
@@ -306,6 +333,10 @@
if (*s == '\n')
*s = '\0';
+ asprintf(&s, "DOC: %s", line);
+ consume_symbol(s);
+ free(s);
+
vec[0] = KERNELDOC;
vec[1] = DOCBOOK;
vec[2] = FUNCTION;
@@ -315,6 +346,84 @@
exec_kernel_doc(vec);
}
+static void find_all_symbols(char *filename)
+{
+ char *vec[4]; /* kerneldoc -list file NULL */
+ pid_t pid;
+ int ret, i, count, start;
+ char real_filename[PATH_MAX + 1];
+ int pipefd[2];
+ char *data, *str;
+ size_t data_len = 0;
+
+ vec[0] = KERNELDOC;
+ vec[1] = LIST;
+ vec[2] = filename;
+ vec[3] = NULL;
+
+ if (pipe(pipefd)) {
+ perror("pipe");
+ exit(1);
+ }
+
+ switch (pid=fork()) {
+ case -1:
+ perror("fork");
+ exit(1);
+ case 0:
+ close(pipefd[0]);
+ dup2(pipefd[1], 1);
+ memset(real_filename, 0, sizeof(real_filename));
+ strncat(real_filename, kernsrctree, PATH_MAX);
+ strncat(real_filename, "/" KERNELDOCPATH KERNELDOC,
+ PATH_MAX - strlen(real_filename));
+ execvp(real_filename, vec);
+ fprintf(stderr, "exec ");
+ perror(real_filename);
+ exit(1);
+ default:
+ close(pipefd[1]);
+ data = malloc(4096);
+ do {
+ while ((ret = read(pipefd[0],
+ data + data_len,
+ 4096)) > 0) {
+ data_len += ret;
+ data = realloc(data, data_len + 4096);
+ }
+ } while (ret == -EAGAIN);
+ if (ret != 0) {
+ perror("read");
+ exit(1);
+ }
+ waitpid(pid, &ret ,0);
+ }
+ if (WIFEXITED(ret))
+ exitstatus |= WEXITSTATUS(ret);
+ else
+ exitstatus = 0xff;
+
+ count = 0;
+ /* poor man's strtok, but with counting */
+ for (i = 0; i < data_len; i++) {
+ if (data[i] == '\n') {
+ count++;
+ data[i] = '\0';
+ }
+ }
+ start = all_list_len;
+ all_list_len += count;
+ all_list = realloc(all_list, sizeof(char *) * all_list_len);
+ str = data;
+ for (i = 0; i < data_len && start != all_list_len; i++) {
+ if (data[i] == '\0') {
+ all_list[start] = str;
+ str = data + i + 1;
+ start++;
+ }
+ }
+}
+
/*
* Parse file, calling action specific functions for:
* 1) Lines containing !E
@@ -322,7 +431,8 @@
* 3) Lines containing !D
* 4) Lines containing !F
* 5) Lines containing !P
- * 6) Default lines - lines not matching the above
+ * 6) Lines containing !C
+ * 7) Default lines - lines not matching the above
*/
static void parse_file(FILE *infile)
{
@@ -365,6 +475,12 @@
s++;
docsection(line + 2, s);
break;
+ case 'C':
+ while (*s && !isspace(*s)) s++;
+ *s = '\0';
+ if (findall)
+ findall(line+2);
+ break;
default:
defaultline(line);
}
@@ -380,6 +496,7 @@
int main(int argc, char *argv[])
{
FILE * infile;
+ int i;
srctree = getenv("SRCTREE");
if (!srctree)
@@ -415,6 +532,7 @@
symbolsonly = find_export_symbols;
singlefunctions = noaction2;
docsection = noaction2;
+ findall = find_all_symbols;
parse_file(infile);
/* Rewind to start from beginning of file again */
@@ -425,8 +543,16 @@
symbolsonly = printline;
singlefunctions = singfunc;
docsection = docsect;
+ findall = NULL;
parse_file(infile);
+
+ for (i = 0; i < all_list_len; i++) {
+ if (!all_list[i])
+ continue;
+ fprintf(stderr, "Warning: didn't use docs for %s\n",
+ all_list[i]);
+ }
}
else if (strcmp("depend", argv[1]) == 0)
{
@@ -439,6 +565,7 @@
symbolsonly = adddep;
singlefunctions = adddep2;
docsection = adddep2;
+ findall = adddep;
parse_file(infile);
printf("\n");
}
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 102e123..cdb6dc1 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -44,12 +44,13 @@
# Note: This only supports 'c'.
# usage:
-# kernel-doc [ -docbook | -html | -text | -man ] [ -no-doc-sections ]
+# kernel-doc [ -docbook | -html | -text | -man | -list ] [ -no-doc-sections ]
# [ -function funcname [ -function funcname ...] ] c file(s)s > outputfile
# or
# [ -nofunction funcname [ -function funcname ...] ] c file(s)s > outputfile
#
# Set output format using one of -docbook -html -text or -man. Default is man.
+# The -list format is for internal use by docproc.
#
# -no-doc-sections
# Do not output DOC: sections
@@ -210,9 +211,16 @@
$type_param, "\$1" );
my $blankline_text = "";
+# list mode
+my %highlights_list = ( $type_constant, "\$1",
+ $type_func, "\$1",
+ $type_struct, "\$1",
+ $type_param, "\$1" );
+my $blankline_list = "";
sub usage {
- print "Usage: $0 [ -v ] [ -docbook | -html | -text | -man ] [ -no-doc-sections ]\n";
+ print "Usage: $0 [ -v ] [ -docbook | -html | -text | -man | -list ]\n";
+ print " [ -no-doc-sections ]\n";
print " [ -function funcname [ -function funcname ...] ]\n";
print " [ -nofunction funcname [ -nofunction funcname ...] ]\n";
print " c source file(s) > outputfile\n";
@@ -318,6 +326,10 @@
$output_mode = "xml";
%highlights = %highlights_xml;
$blankline = $blankline_xml;
+ } elsif ($cmd eq "-list") {
+ $output_mode = "list";
+ %highlights = %highlights_list;
+ $blankline = $blankline_list;
} elsif ($cmd eq "-gnome") {
$output_mode = "gnome";
%highlights = %highlights_gnome;
@@ -1361,6 +1373,42 @@
}
}
+## list mode output functions
+
+sub output_function_list(%) {
+ my %args = %{$_[0]};
+
+ print $args{'function'} . "\n";
+}
+
+# output enum in list
+sub output_enum_list(%) {
+ my %args = %{$_[0]};
+ print $args{'enum'} . "\n";
+}
+
+# output typedef in list
+sub output_typedef_list(%) {
+ my %args = %{$_[0]};
+ print $args{'typedef'} . "\n";
+}
+
+# output struct as list
+sub output_struct_list(%) {
+ my %args = %{$_[0]};
+
+ print $args{'struct'} . "\n";
+}
+
+sub output_blockhead_list(%) {
+ my %args = %{$_[0]};
+ my ($parameter, $section);
+
+ foreach $section (@{$args{'sectionlist'}}) {
+ print "DOC: $section\n";
+ }
+}
+
##
# generic output function for all types (function, struct/union, typedef, enum);
# calls the generated, variable output_ function name based on
@@ -1679,7 +1727,7 @@
foreach $px (0 .. $#prms) {
$prm_clean = $prms[$px];
$prm_clean =~ s/\[.*\]//;
- $prm_clean =~ s/__attribute__\s*\(\([a-z,_\*\s\(\)]*\)\)//;
+ $prm_clean =~ s/__attribute__\s*\(\([a-z,_\*\s\(\)]*\)\)//i;
# ignore array size in a parameter string;
# however, the original param string may contain
# spaces, e.g.: addr[6 + 2]
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index e67f054..1d7963f 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -270,6 +270,8 @@
} elsif ($arch eq "arm") {
$alignment = 2;
$section_type = '%progbits';
+ $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_ARM_(CALL|PC24|THM_CALL)" .
+ "\\s+(__gnu_mcount_nc|mcount)\$";
} elsif ($arch eq "ia64") {
$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$";
diff --git a/security/apparmor/include/resource.h b/security/apparmor/include/resource.h
index 3c88be9..02baec7 100644
--- a/security/apparmor/include/resource.h
+++ b/security/apparmor/include/resource.h
@@ -33,8 +33,8 @@
};
int aa_map_resource(int resource);
-int aa_task_setrlimit(struct aa_profile *profile, unsigned int resource,
- struct rlimit *new_rlim);
+int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *,
+ unsigned int resource, struct rlimit *new_rlim);
void __aa_transition_rlimits(struct aa_profile *old, struct aa_profile *new);
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
index 6e85cdb..506d2ba 100644
--- a/security/apparmor/lib.c
+++ b/security/apparmor/lib.c
@@ -40,6 +40,7 @@
*ns_name = NULL;
if (name[0] == ':') {
char *split = strchr(&name[1], ':');
+ *ns_name = skip_spaces(&name[1]);
if (split) {
/* overwrite ':' with \0 */
*split = 0;
@@ -47,7 +48,6 @@
} else
/* a ns name without a following profile is allowed */
name = NULL;
- *ns_name = &name[1];
}
if (name && *name == 0)
name = NULL;
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index f73e2c2..cf1de44 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -614,7 +614,7 @@
int error = 0;
if (!unconfined(profile))
- error = aa_task_setrlimit(profile, resource, new_rlim);
+ error = aa_task_setrlimit(profile, task, resource, new_rlim);
return error;
}
diff --git a/security/apparmor/path.c b/security/apparmor/path.c
index 19358dc..8239605 100644
--- a/security/apparmor/path.c
+++ b/security/apparmor/path.c
@@ -59,8 +59,7 @@
{
struct path root, tmp;
char *res;
- int deleted, connected;
- int error = 0;
+ int connected, error = 0;
/* Get the root we want to resolve too, released below */
if (flags & PATH_CHROOT_REL) {
@@ -74,19 +73,8 @@
}
spin_lock(&dcache_lock);
- /* There is a race window between path lookup here and the
- * need to strip the " (deleted) string that __d_path applies
- * Detect the race and relookup the path
- *
- * The stripping of (deleted) is a hack that could be removed
- * with an updated __d_path
- */
- do {
- tmp = root;
- deleted = d_unlinked(path->dentry);
- res = __d_path(path, &tmp, buf, buflen);
-
- } while (deleted != d_unlinked(path->dentry));
+ tmp = root;
+ res = __d_path(path, &tmp, buf, buflen);
spin_unlock(&dcache_lock);
*name = res;
@@ -98,21 +86,17 @@
*name = buf;
goto out;
}
- if (deleted) {
- /* On some filesystems, newly allocated dentries appear to the
- * security_path hooks as a deleted dentry except without an
- * inode allocated.
- *
- * Remove the appended deleted text and return as string for
- * normal mediation, or auditing. The (deleted) string is
- * guaranteed to be added in this case, so just strip it.
- */
- buf[buflen - 11] = 0; /* - (len(" (deleted)") +\0) */
- if (path->dentry->d_inode && !(flags & PATH_MEDIATE_DELETED)) {
+ /* Handle two cases:
+ * 1. A deleted dentry && profile is not allowing mediation of deleted
+ * 2. On some filesystems, newly allocated dentries appear to the
+ * security_path hooks as a deleted dentry except without an inode
+ * allocated.
+ */
+ if (d_unlinked(path->dentry) && path->dentry->d_inode &&
+ !(flags & PATH_MEDIATE_DELETED)) {
error = -ENOENT;
goto out;
- }
}
/* Determine if the path is connected to the expected root */
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index 3cdc1ad..52cc865 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -1151,12 +1151,14 @@
/* released below */
ns = aa_get_namespace(root);
- write_lock(&ns->lock);
if (!name) {
/* remove namespace - can only happen if fqname[0] == ':' */
+ write_lock(&ns->parent->lock);
__remove_namespace(ns);
+ write_unlock(&ns->parent->lock);
} else {
/* remove profile */
+ write_lock(&ns->lock);
profile = aa_get_profile(__lookup_profile(&ns->base, name));
if (!profile) {
error = -ENOENT;
@@ -1165,8 +1167,8 @@
}
name = profile->base.hname;
__remove_profile(profile);
+ write_unlock(&ns->lock);
}
- write_unlock(&ns->lock);
/* don't fail removal if audit fails */
(void) audit_policy(OP_PROF_RM, GFP_KERNEL, name, info, error);
diff --git a/security/apparmor/resource.c b/security/apparmor/resource.c
index 4a368f1..a4136c1 100644
--- a/security/apparmor/resource.c
+++ b/security/apparmor/resource.c
@@ -72,6 +72,7 @@
/**
* aa_task_setrlimit - test permission to set an rlimit
* @profile - profile confining the task (NOT NULL)
+ * @task - task the resource is being set on
* @resource - the resource being set
* @new_rlim - the new resource limit (NOT NULL)
*
@@ -79,18 +80,21 @@
*
* Returns: 0 or error code if setting resource failed
*/
-int aa_task_setrlimit(struct aa_profile *profile, unsigned int resource,
- struct rlimit *new_rlim)
+int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *task,
+ unsigned int resource, struct rlimit *new_rlim)
{
int error = 0;
- if (profile->rlimits.mask & (1 << resource) &&
- new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max)
+ /* TODO: extend resource control to handle other (non current)
+ * processes. AppArmor rules currently have the implicit assumption
+ * that the task is setting the resource of the current process
+ */
+ if ((task != current->group_leader) ||
+ (profile->rlimits.mask & (1 << resource) &&
+ new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max))
+ error = -EACCES;
- error = audit_resource(profile, resource, new_rlim->rlim_max,
- -EACCES);
-
- return error;
+ return audit_resource(profile, resource, new_rlim->rlim_max, error);
}
/**
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 16d100d3..3fbcd1d 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -35,6 +35,7 @@
#define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS)
/* set during initialization */
+extern int iint_initialized;
extern int ima_initialized;
extern int ima_used_chip;
extern char *ima_hash;
diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c
index 7625b85..afba4ae 100644
--- a/security/integrity/ima/ima_iint.c
+++ b/security/integrity/ima/ima_iint.c
@@ -22,9 +22,10 @@
RADIX_TREE(ima_iint_store, GFP_ATOMIC);
DEFINE_SPINLOCK(ima_iint_lock);
-
static struct kmem_cache *iint_cache __read_mostly;
+int iint_initialized = 0;
+
/* ima_iint_find_get - return the iint associated with an inode
*
* ima_iint_find_get gets a reference to the iint. Caller must
@@ -141,6 +142,7 @@
iint_cache =
kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0,
SLAB_PANIC, init_once);
+ iint_initialized = 1;
return 0;
}
security_initcall(ima_iintcache_init);
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index f936413..e662b89 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -148,12 +148,14 @@
struct ima_iint_cache *iint;
int rc;
- if (!ima_initialized || !S_ISREG(inode->i_mode))
+ if (!iint_initialized || !S_ISREG(inode->i_mode))
return;
iint = ima_iint_find_get(inode);
if (!iint)
return;
mutex_lock(&iint->mutex);
+ if (!ima_initialized)
+ goto out;
rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK);
if (rc < 0)
goto out;
@@ -213,7 +215,7 @@
struct inode *inode = file->f_dentry->d_inode;
struct ima_iint_cache *iint;
- if (!ima_initialized || !S_ISREG(inode->i_mode))
+ if (!iint_initialized || !S_ISREG(inode->i_mode))
return;
iint = ima_iint_find_get(inode);
if (!iint)
@@ -230,7 +232,7 @@
{
struct inode *inode = file->f_dentry->d_inode;
struct ima_iint_cache *iint;
- int rc;
+ int rc = 0;
if (!ima_initialized || !S_ISREG(inode->i_mode))
return 0;
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index b2b0998..60924f6 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1272,6 +1272,7 @@
keyring_r = NULL;
me = current;
+ rcu_read_lock();
write_lock_irq(&tasklist_lock);
parent = me->real_parent;
@@ -1304,7 +1305,8 @@
goto not_permitted;
/* the keyrings must have the same UID */
- if (pcred->tgcred->session_keyring->uid != mycred->euid ||
+ if ((pcred->tgcred->session_keyring &&
+ pcred->tgcred->session_keyring->uid != mycred->euid) ||
mycred->tgcred->session_keyring->uid != mycred->euid)
goto not_permitted;
@@ -1319,6 +1321,7 @@
set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME);
write_unlock_irq(&tasklist_lock);
+ rcu_read_unlock();
if (oldcred)
put_cred(oldcred);
return 0;
@@ -1327,6 +1330,7 @@
ret = 0;
not_permitted:
write_unlock_irq(&tasklist_lock);
+ rcu_read_unlock();
put_cred(cred);
return ret;
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index ef43995..c668b44 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -1416,15 +1416,19 @@
const pid_t gpid = task_pid_nr(current);
static const int tomoyo_buffer_len = 4096;
char *buffer = kmalloc(tomoyo_buffer_len, GFP_NOFS);
+ pid_t ppid;
if (!buffer)
return NULL;
do_gettimeofday(&tv);
+ rcu_read_lock();
+ ppid = task_tgid_vnr(current->real_parent);
+ rcu_read_unlock();
snprintf(buffer, tomoyo_buffer_len - 1,
"#timestamp=%lu profile=%u mode=%s (global-pid=%u)"
" task={ pid=%u ppid=%u uid=%u gid=%u euid=%u"
" egid=%u suid=%u sgid=%u fsuid=%u fsgid=%u }",
tv.tv_sec, r->profile, tomoyo_mode[r->mode], gpid,
- (pid_t) sys_getpid(), (pid_t) sys_getppid(),
+ task_tgid_vnr(current), ppid,
current_uid(), current_gid(), current_euid(),
current_egid(), current_suid(), current_sgid(),
current_fsuid(), current_fsgid());
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index 04454cb7b..7c66bd8 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -689,9 +689,6 @@
/********** Function prototypes. **********/
-extern asmlinkage long sys_getpid(void);
-extern asmlinkage long sys_getppid(void);
-
/* Check whether the given string starts with the given keyword. */
bool tomoyo_str_starts(char **src, const char *find);
/* Get tomoyo_realpath() of current process. */
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 204af48..ac242a3 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -372,14 +372,17 @@
struct snd_info_buffer *buffer)
{
struct snd_pcm_substream *substream = entry->private_data;
- struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_pcm_runtime *runtime;
+
+ mutex_lock(&substream->pcm->open_mutex);
+ runtime = substream->runtime;
if (!runtime) {
snd_iprintf(buffer, "closed\n");
- return;
+ goto unlock;
}
if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
snd_iprintf(buffer, "no setup\n");
- return;
+ goto unlock;
}
snd_iprintf(buffer, "access: %s\n", snd_pcm_access_name(runtime->access));
snd_iprintf(buffer, "format: %s\n", snd_pcm_format_name(runtime->format));
@@ -398,20 +401,25 @@
snd_iprintf(buffer, "OSS period frames: %lu\n", (unsigned long)runtime->oss.period_frames);
}
#endif
+ unlock:
+ mutex_unlock(&substream->pcm->open_mutex);
}
static void snd_pcm_substream_proc_sw_params_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_pcm_substream *substream = entry->private_data;
- struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_pcm_runtime *runtime;
+
+ mutex_lock(&substream->pcm->open_mutex);
+ runtime = substream->runtime;
if (!runtime) {
snd_iprintf(buffer, "closed\n");
- return;
+ goto unlock;
}
if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
snd_iprintf(buffer, "no setup\n");
- return;
+ goto unlock;
}
snd_iprintf(buffer, "tstamp_mode: %s\n", snd_pcm_tstamp_mode_name(runtime->tstamp_mode));
snd_iprintf(buffer, "period_step: %u\n", runtime->period_step);
@@ -421,24 +429,29 @@
snd_iprintf(buffer, "silence_threshold: %lu\n", runtime->silence_threshold);
snd_iprintf(buffer, "silence_size: %lu\n", runtime->silence_size);
snd_iprintf(buffer, "boundary: %lu\n", runtime->boundary);
+ unlock:
+ mutex_unlock(&substream->pcm->open_mutex);
}
static void snd_pcm_substream_proc_status_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_pcm_substream *substream = entry->private_data;
- struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_pcm_runtime *runtime;
struct snd_pcm_status status;
int err;
+
+ mutex_lock(&substream->pcm->open_mutex);
+ runtime = substream->runtime;
if (!runtime) {
snd_iprintf(buffer, "closed\n");
- return;
+ goto unlock;
}
memset(&status, 0, sizeof(status));
err = snd_pcm_status(substream, &status);
if (err < 0) {
snd_iprintf(buffer, "error %d\n", err);
- return;
+ goto unlock;
}
snd_iprintf(buffer, "state: %s\n", snd_pcm_state_name(status.state));
snd_iprintf(buffer, "owner_pid : %d\n", pid_vnr(substream->pid));
@@ -452,6 +465,8 @@
snd_iprintf(buffer, "-----\n");
snd_iprintf(buffer, "hw_ptr : %ld\n", runtime->status->hw_ptr);
snd_iprintf(buffer, "appl_ptr : %ld\n", runtime->control->appl_ptr);
+ unlock:
+ mutex_unlock(&substream->pcm->open_mutex);
}
#ifdef CONFIG_SND_PCM_XRUN_DEBUG
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 134fc6c..d4eb2ef 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1992,6 +1992,8 @@
substream->ops->close(substream);
substream->hw_opened = 0;
}
+ if (pm_qos_request_active(&substream->latency_pm_qos_req))
+ pm_qos_remove_request(&substream->latency_pm_qos_req);
if (substream->pcm_release) {
substream->pcm_release(substream);
substream->pcm_release = NULL;
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index eb68326..a7868ad 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -829,6 +829,8 @@
if (get_user(device, (int __user *)argp))
return -EFAULT;
+ if (device >= SNDRV_RAWMIDI_DEVICES) /* next device is -1 */
+ device = SNDRV_RAWMIDI_DEVICES - 1;
mutex_lock(®ister_mutex);
device = device < 0 ? 0 : device + 1;
while (device < SNDRV_RAWMIDI_DEVICES) {
diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c
index 6857122..69cd7b3 100644
--- a/sound/core/seq/oss/seq_oss_init.c
+++ b/sound/core/seq/oss/seq_oss_init.c
@@ -281,13 +281,10 @@
return 0;
_error:
- snd_seq_oss_writeq_delete(dp->writeq);
- snd_seq_oss_readq_delete(dp->readq);
snd_seq_oss_synth_cleanup(dp);
snd_seq_oss_midi_cleanup(dp);
- delete_port(dp);
delete_seq_queue(dp->queue);
- kfree(dp);
+ delete_port(dp);
return rc;
}
@@ -350,8 +347,10 @@
static int
delete_port(struct seq_oss_devinfo *dp)
{
- if (dp->port < 0)
+ if (dp->port < 0) {
+ kfree(dp);
return 0;
+ }
debug_printk(("delete_port %i\n", dp->port));
return snd_seq_event_port_detach(dp->cseq, dp->port);
diff --git a/sound/isa/msnd/msnd_pinnacle.c b/sound/isa/msnd/msnd_pinnacle.c
index 5f3e684..91d6023 100644
--- a/sound/isa/msnd/msnd_pinnacle.c
+++ b/sound/isa/msnd/msnd_pinnacle.c
@@ -764,9 +764,9 @@
static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ;
static long mem[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
+#ifndef MSND_CLASSIC
static long cfg[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
-#ifndef MSND_CLASSIC
/* Extra Peripheral Configuration (Default: Disable) */
static long ide_io0[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
static long ide_io1[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
@@ -894,7 +894,11 @@
struct snd_card *card;
struct snd_msnd *chip;
- if (has_isapnp(idx) || cfg[idx] == SNDRV_AUTO_PORT) {
+ if (has_isapnp(idx)
+#ifndef MSND_CLASSIC
+ || cfg[idx] == SNDRV_AUTO_PORT
+#endif
+ ) {
printk(KERN_INFO LOGNAME ": Assuming PnP mode\n");
return -ENODEV;
}
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 3827092..1482921 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -4536,7 +4536,7 @@
cfg->hp_outs--;
memmove(cfg->hp_pins + i, cfg->hp_pins + i + 1,
sizeof(cfg->hp_pins[0]) * (cfg->hp_outs - i));
- memmove(sequences_hp + i - 1, sequences_hp + i,
+ memmove(sequences_hp + i, sequences_hp + i + 1,
sizeof(sequences_hp[0]) * (cfg->hp_outs - i));
}
}
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 1053fff..34940a0 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -126,6 +126,7 @@
"{Intel, ICH10},"
"{Intel, PCH},"
"{Intel, CPT},"
+ "{Intel, PBG},"
"{Intel, SCH},"
"{ATI, SB450},"
"{ATI, SB600},"
@@ -2749,6 +2750,8 @@
{ PCI_DEVICE(0x8086, 0x3b57), .driver_data = AZX_DRIVER_ICH },
/* CPT */
{ PCI_DEVICE(0x8086, 0x1c20), .driver_data = AZX_DRIVER_PCH },
+ /* PBG */
+ { PCI_DEVICE(0x8086, 0x1d20), .driver_data = AZX_DRIVER_PCH },
/* SCH */
{ PCI_DEVICE(0x8086, 0x811b), .driver_data = AZX_DRIVER_SCH },
/* ATI SB 450/600 */
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index b697fd2..10bbbaf 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -3641,6 +3641,7 @@
/* Lenovo Thinkpad T61/X61 */
SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1984_THINKPAD),
SND_PCI_QUIRK(0x1028, 0x0214, "Dell T3400", AD1984_DELL_DESKTOP),
+ SND_PCI_QUIRK(0x1028, 0x0233, "Dell Latitude E6400", AD1984_DELL_DESKTOP),
{}
};
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 4ef5efa..488fd9a 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -972,6 +972,53 @@
{} /* terminator */
};
+/* Errata: CS4207 rev C0/C1/C2 Silicon
+ *
+ * http://www.cirrus.com/en/pubs/errata/ER880C3.pdf
+ *
+ * 6. At high temperature (TA > +85°C), the digital supply current (IVD)
+ * may be excessive (up to an additional 200 μA), which is most easily
+ * observed while the part is being held in reset (RESET# active low).
+ *
+ * Root Cause: At initial powerup of the device, the logic that drives
+ * the clock and write enable to the S/PDIF SRC RAMs is not properly
+ * initialized.
+ * Certain random patterns will cause a steady leakage current in those
+ * RAM cells. The issue will resolve once the SRCs are used (turned on).
+ *
+ * Workaround: The following verb sequence briefly turns on the S/PDIF SRC
+ * blocks, which will alleviate the issue.
+ */
+
+static struct hda_verb cs_errata_init_verbs[] = {
+ {0x01, AC_VERB_SET_POWER_STATE, 0x00}, /* AFG: D0 */
+ {0x11, AC_VERB_SET_PROC_STATE, 0x01}, /* VPW: processing on */
+
+ {0x11, AC_VERB_SET_COEF_INDEX, 0x0008},
+ {0x11, AC_VERB_SET_PROC_COEF, 0x9999},
+ {0x11, AC_VERB_SET_COEF_INDEX, 0x0017},
+ {0x11, AC_VERB_SET_PROC_COEF, 0xa412},
+ {0x11, AC_VERB_SET_COEF_INDEX, 0x0001},
+ {0x11, AC_VERB_SET_PROC_COEF, 0x0009},
+
+ {0x07, AC_VERB_SET_POWER_STATE, 0x00}, /* S/PDIF Rx: D0 */
+ {0x08, AC_VERB_SET_POWER_STATE, 0x00}, /* S/PDIF Tx: D0 */
+
+ {0x11, AC_VERB_SET_COEF_INDEX, 0x0017},
+ {0x11, AC_VERB_SET_PROC_COEF, 0x2412},
+ {0x11, AC_VERB_SET_COEF_INDEX, 0x0008},
+ {0x11, AC_VERB_SET_PROC_COEF, 0x0000},
+ {0x11, AC_VERB_SET_COEF_INDEX, 0x0001},
+ {0x11, AC_VERB_SET_PROC_COEF, 0x0008},
+ {0x11, AC_VERB_SET_PROC_STATE, 0x00},
+
+ {0x07, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Rx: D3 */
+ {0x08, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Tx: D3 */
+ /*{0x01, AC_VERB_SET_POWER_STATE, 0x03},*/ /* AFG: D3 This is already handled */
+
+ {} /* terminator */
+};
+
/* SPDIF setup */
static void init_digital(struct hda_codec *codec)
{
@@ -991,6 +1038,9 @@
{
struct cs_spec *spec = codec->spec;
+ /* init_verb sequence for C0/C1/C2 errata*/
+ snd_hda_sequence_write(codec, cs_errata_init_verbs);
+
snd_hda_sequence_write(codec, cs_coef_init_verbs);
if (spec->gpio_mask) {
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 5cdb80e..972e7c4 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -116,6 +116,7 @@
unsigned int dell_vostro:1;
unsigned int ideapad:1;
unsigned int thinkpad:1;
+ unsigned int hp_laptop:1;
unsigned int ext_mic_present;
unsigned int recording;
@@ -2299,6 +2300,18 @@
}
}
+/* toggle input of built-in digital mic and mic jack appropriately */
+static void cxt5066_hp_laptop_automic(struct hda_codec *codec)
+{
+ unsigned int present;
+
+ present = snd_hda_jack_detect(codec, 0x1b);
+ snd_printdd("CXT5066: external microphone present=%d\n", present);
+ snd_hda_codec_write(codec, 0x17, 0, AC_VERB_SET_CONNECT_SEL,
+ present ? 1 : 3);
+}
+
+
/* toggle input of built-in digital mic and mic jack appropriately
order is: external mic -> dock mic -> interal mic */
static void cxt5066_thinkpad_automic(struct hda_codec *codec)
@@ -2408,6 +2421,20 @@
}
/* unsolicited event for jack sensing */
+static void cxt5066_hp_laptop_event(struct hda_codec *codec, unsigned int res)
+{
+ snd_printdd("CXT5066_hp_laptop: unsol event %x (%x)\n", res, res >> 26);
+ switch (res >> 26) {
+ case CONEXANT_HP_EVENT:
+ cxt5066_hp_automute(codec);
+ break;
+ case CONEXANT_MIC_EVENT:
+ cxt5066_hp_laptop_automic(codec);
+ break;
+ }
+}
+
+/* unsolicited event for jack sensing */
static void cxt5066_thinkpad_event(struct hda_codec *codec, unsigned int res)
{
snd_printdd("CXT5066_thinkpad: unsol event %x (%x)\n", res, res >> 26);
@@ -2989,6 +3016,14 @@
{ } /* end */
};
+
+static struct hda_verb cxt5066_init_verbs_hp_laptop[] = {
+ {0x14, AC_VERB_SET_CONNECT_SEL, 0x0},
+ {0x19, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_HP_EVENT},
+ {0x1b, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_MIC_EVENT},
+ { } /* end */
+};
+
/* initialize jack-sensing, too */
static int cxt5066_init(struct hda_codec *codec)
{
@@ -3004,6 +3039,8 @@
cxt5066_ideapad_automic(codec);
else if (spec->thinkpad)
cxt5066_thinkpad_automic(codec);
+ else if (spec->hp_laptop)
+ cxt5066_hp_laptop_automic(codec);
}
cxt5066_set_mic_boost(codec);
return 0;
@@ -3031,6 +3068,7 @@
CXT5066_DELL_VOSTO, /* Dell Vostro 1015i */
CXT5066_IDEAPAD, /* Lenovo IdeaPad U150 */
CXT5066_THINKPAD, /* Lenovo ThinkPad T410s, others? */
+ CXT5066_HP_LAPTOP, /* HP Laptop */
CXT5066_MODELS
};
@@ -3041,6 +3079,7 @@
[CXT5066_DELL_VOSTO] = "dell-vostro",
[CXT5066_IDEAPAD] = "ideapad",
[CXT5066_THINKPAD] = "thinkpad",
+ [CXT5066_HP_LAPTOP] = "hp-laptop",
};
static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
@@ -3052,8 +3091,11 @@
SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTO),
SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO),
SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
+ SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP),
+ SND_PCI_QUIRK(0x1179, 0xff1e, "Toshiba Satellite C650D", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5),
+ SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD),
@@ -3116,6 +3158,23 @@
spec->num_init_verbs++;
spec->dell_automute = 1;
break;
+ case CXT5066_HP_LAPTOP:
+ codec->patch_ops.init = cxt5066_init;
+ codec->patch_ops.unsol_event = cxt5066_hp_laptop_event;
+ spec->init_verbs[spec->num_init_verbs] =
+ cxt5066_init_verbs_hp_laptop;
+ spec->num_init_verbs++;
+ spec->hp_laptop = 1;
+ spec->mixers[spec->num_mixers++] = cxt5066_mixer_master;
+ spec->mixers[spec->num_mixers++] = cxt5066_mixers;
+ /* no S/PDIF out */
+ spec->multiout.dig_out_nid = 0;
+ /* input source automatically selected */
+ spec->input_mux = NULL;
+ spec->port_d_mode = 0;
+ spec->mic_boost = 3; /* default 30dB gain */
+ break;
+
case CXT5066_OLPC_XO_1_5:
codec->patch_ops.init = cxt5066_olpc_init;
codec->patch_ops.unsol_event = cxt5066_olpc_unsol_event;
diff --git a/sound/pci/hda/patch_nvhdmi.c b/sound/pci/hda/patch_nvhdmi.c
index 69b950d..baa108b 100644
--- a/sound/pci/hda/patch_nvhdmi.c
+++ b/sound/pci/hda/patch_nvhdmi.c
@@ -84,7 +84,7 @@
#else
/* support all rates and formats */
#define SUPPORTED_RATES \
- (SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\
+ (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\
SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |\
SNDRV_PCM_RATE_192000)
#define SUPPORTED_MAXBPS 24
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 627bf99..a432e6e 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1594,12 +1594,22 @@
}
if (spec->autocfg.dig_in_pin) {
- hda_nid_t dig_nid;
- err = snd_hda_get_connections(codec,
- spec->autocfg.dig_in_pin,
- &dig_nid, 1);
- if (err > 0)
- spec->dig_in_nid = dig_nid;
+ dig_nid = codec->start_nid;
+ for (i = 0; i < codec->num_nodes; i++, dig_nid++) {
+ unsigned int wcaps = get_wcaps(codec, dig_nid);
+ if (get_wcaps_type(wcaps) != AC_WID_AUD_IN)
+ continue;
+ if (!(wcaps & AC_WCAP_DIGITAL))
+ continue;
+ if (!(wcaps & AC_WCAP_CONN_LIST))
+ continue;
+ err = get_connection_index(codec, dig_nid,
+ spec->autocfg.dig_in_pin);
+ if (err >= 0) {
+ spec->dig_in_nid = dig_nid;
+ break;
+ }
+ }
}
}
@@ -5334,6 +5344,7 @@
static struct snd_pci_quirk beep_white_list[] = {
SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1),
+ SND_PCI_QUIRK(0x1043, 0x83ce, "EeePC", 1),
SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1),
{}
};
@@ -14452,6 +14463,7 @@
enum {
ALC269_FIXUP_SONY_VAIO,
+ ALC269_FIXUP_DELL_M101Z,
};
static const struct hda_verb alc269_sony_vaio_fixup_verbs[] = {
@@ -14463,11 +14475,20 @@
[ALC269_FIXUP_SONY_VAIO] = {
.verbs = alc269_sony_vaio_fixup_verbs
},
+ [ALC269_FIXUP_DELL_M101Z] = {
+ .verbs = (const struct hda_verb[]) {
+ /* Enables internal speaker */
+ {0x20, AC_VERB_SET_COEF_INDEX, 13},
+ {0x20, AC_VERB_SET_PROC_COEF, 0x4040},
+ {}
+ }
+ },
};
static struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x104d, 0x9071, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
SND_PCI_QUIRK(0x104d, 0x9077, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
+ SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
{}
};
diff --git a/sound/pci/oxygen/oxygen.c b/sound/pci/oxygen/oxygen.c
index 289cb4d..6c0a11a 100644
--- a/sound/pci/oxygen/oxygen.c
+++ b/sound/pci/oxygen/oxygen.c
@@ -543,6 +543,10 @@
chip->model.suspend = claro_suspend;
chip->model.resume = claro_resume;
chip->model.set_adc_params = set_ak5385_params;
+ chip->model.device_config = PLAYBACK_0_TO_I2S |
+ PLAYBACK_1_TO_SPDIF |
+ CAPTURE_0_FROM_I2S_2 |
+ CAPTURE_1_FROM_SPDIF;
break;
}
if (id->driver_data == MODEL_MERIDIAN ||
diff --git a/sound/pci/oxygen/oxygen.h b/sound/pci/oxygen/oxygen.h
index 6147216..a3409ed 100644
--- a/sound/pci/oxygen/oxygen.h
+++ b/sound/pci/oxygen/oxygen.h
@@ -155,6 +155,7 @@
int oxygen_pci_suspend(struct pci_dev *pci, pm_message_t state);
int oxygen_pci_resume(struct pci_dev *pci);
#endif
+void oxygen_pci_shutdown(struct pci_dev *pci);
/* oxygen_mixer.c */
diff --git a/sound/pci/oxygen/oxygen_lib.c b/sound/pci/oxygen/oxygen_lib.c
index fad03d6..7e93cf8 100644
--- a/sound/pci/oxygen/oxygen_lib.c
+++ b/sound/pci/oxygen/oxygen_lib.c
@@ -519,16 +519,21 @@
}
}
-static void oxygen_card_free(struct snd_card *card)
+static void oxygen_shutdown(struct oxygen *chip)
{
- struct oxygen *chip = card->private_data;
-
spin_lock_irq(&chip->reg_lock);
chip->interrupt_mask = 0;
chip->pcm_running = 0;
oxygen_write16(chip, OXYGEN_DMA_STATUS, 0);
oxygen_write16(chip, OXYGEN_INTERRUPT_MASK, 0);
spin_unlock_irq(&chip->reg_lock);
+}
+
+static void oxygen_card_free(struct snd_card *card)
+{
+ struct oxygen *chip = card->private_data;
+
+ oxygen_shutdown(chip);
if (chip->irq >= 0)
free_irq(chip->irq, chip);
flush_scheduled_work();
@@ -778,3 +783,13 @@
}
EXPORT_SYMBOL(oxygen_pci_resume);
#endif /* CONFIG_PM */
+
+void oxygen_pci_shutdown(struct pci_dev *pci)
+{
+ struct snd_card *card = pci_get_drvdata(pci);
+ struct oxygen *chip = card->private_data;
+
+ oxygen_shutdown(chip);
+ chip->model.cleanup(chip);
+}
+EXPORT_SYMBOL(oxygen_pci_shutdown);
diff --git a/sound/pci/oxygen/virtuoso.c b/sound/pci/oxygen/virtuoso.c
index f03a2f2..06c863e 100644
--- a/sound/pci/oxygen/virtuoso.c
+++ b/sound/pci/oxygen/virtuoso.c
@@ -95,6 +95,7 @@
.suspend = oxygen_pci_suspend,
.resume = oxygen_pci_resume,
#endif
+ .shutdown = oxygen_pci_shutdown,
};
static int __init alsa_card_xonar_init(void)
diff --git a/sound/pci/oxygen/xonar_wm87x6.c b/sound/pci/oxygen/xonar_wm87x6.c
index dbc4b89..b82c1cf 100644
--- a/sound/pci/oxygen/xonar_wm87x6.c
+++ b/sound/pci/oxygen/xonar_wm87x6.c
@@ -53,6 +53,8 @@
struct xonar_generic generic;
u16 wm8776_regs[0x17];
u16 wm8766_regs[0x10];
+ struct snd_kcontrol *line_adcmux_control;
+ struct snd_kcontrol *mic_adcmux_control;
struct snd_kcontrol *lc_controls[13];
};
@@ -193,6 +195,7 @@
static void xonar_ds_cleanup(struct oxygen *chip)
{
xonar_disable_output(chip);
+ wm8776_write(chip, WM8776_RESET, 0);
}
static void xonar_ds_suspend(struct oxygen *chip)
@@ -603,6 +606,7 @@
{
struct oxygen *chip = ctl->private_data;
struct xonar_wm87x6 *data = chip->model_data;
+ struct snd_kcontrol *other_ctl;
unsigned int mux_bit = ctl->private_value;
u16 reg;
int changed;
@@ -610,8 +614,18 @@
mutex_lock(&chip->mutex);
reg = data->wm8776_regs[WM8776_ADCMUX];
if (value->value.integer.value[0]) {
- reg &= ~0x003;
reg |= mux_bit;
+ /* line-in and mic-in are exclusive */
+ mux_bit ^= 3;
+ if (reg & mux_bit) {
+ reg &= ~mux_bit;
+ if (mux_bit == 1)
+ other_ctl = data->line_adcmux_control;
+ else
+ other_ctl = data->mic_adcmux_control;
+ snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
+ &other_ctl->id);
+ }
} else
reg &= ~mux_bit;
changed = reg != data->wm8776_regs[WM8776_ADCMUX];
@@ -963,7 +977,13 @@
err = snd_ctl_add(chip->card, ctl);
if (err < 0)
return err;
+ if (!strcmp(ctl->id.name, "Line Capture Switch"))
+ data->line_adcmux_control = ctl;
+ else if (!strcmp(ctl->id.name, "Mic Capture Switch"))
+ data->mic_adcmux_control = ctl;
}
+ if (!data->line_adcmux_control || !data->mic_adcmux_control)
+ return -ENXIO;
BUILD_BUG_ON(ARRAY_SIZE(lc_controls) != ARRAY_SIZE(data->lc_controls));
for (i = 0; i < ARRAY_SIZE(lc_controls); ++i) {
ctl = snd_ctl_new1(&lc_controls[i], chip);
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index b92adef..d6fa7bf 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -4609,6 +4609,7 @@
if (err < 0)
return err;
+ memset(&info, 0, sizeof(info));
spin_lock_irqsave(&hdsp->lock, flags);
info.pref_sync_ref = (unsigned char)hdsp_pref_sync_ref(hdsp);
info.wordclock_sync_check = (unsigned char)hdsp_wc_sync_check(hdsp);
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 547b713..0c98ef9 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -4127,6 +4127,7 @@
case SNDRV_HDSPM_IOCTL_GET_CONFIG_INFO:
+ memset(&info, 0, sizeof(info));
spin_lock_irq(&hdspm->lock);
info.pref_sync_ref = hdspm_pref_sync_ref(hdspm);
info.wordclock_sync_check = hdspm_wc_sync_check(hdspm);
diff --git a/sound/ppc/snd_ps3.c b/sound/ppc/snd_ps3.c
index 2f12da4..581a670 100644
--- a/sound/ppc/snd_ps3.c
+++ b/sound/ppc/snd_ps3.c
@@ -579,7 +579,7 @@
rate * delay_ms / 1000)
* substream->runtime->channels;
- pr_debug(KERN_ERR "%s: time=%d rate=%d bytes=%ld, frames=%d, ret=%d\n",
+ pr_debug("%s: time=%d rate=%d bytes=%ld, frames=%d, ret=%d\n",
__func__,
delay_ms,
rate,
diff --git a/sound/soc/s3c24xx/s3c-dma.c b/sound/soc/s3c24xx/s3c-dma.c
index 1b61c23..f1b1bc4 100644
--- a/sound/soc/s3c24xx/s3c-dma.c
+++ b/sound/soc/s3c24xx/s3c-dma.c
@@ -94,8 +94,7 @@
if ((pos + len) > prtd->dma_end) {
len = prtd->dma_end - pos;
- pr_debug(KERN_DEBUG "%s: corrected dma len %ld\n",
- __func__, len);
+ pr_debug("%s: corrected dma len %ld\n", __func__, len);
}
ret = s3c2410_dma_enqueue(prtd->params->channel,
diff --git a/sound/soc/sh/migor.c b/sound/soc/sh/migor.c
index b823a5c..87e2b7fc 100644
--- a/sound/soc/sh/migor.c
+++ b/sound/soc/sh/migor.c
@@ -12,6 +12,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
+#include <asm/clkdev.h>
#include <asm/clock.h>
#include <cpu/sh7722.h>
@@ -40,12 +41,12 @@
};
static struct clk siumckb_clk = {
- .name = "siumckb_clk",
- .id = -1,
.ops = &siumckb_clk_ops,
.rate = 0, /* initialised at run-time */
};
+static struct clk_lookup *siumckb_lookup;
+
static int migor_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
@@ -180,6 +181,13 @@
if (ret < 0)
return ret;
+ siumckb_lookup = clkdev_alloc(&siumckb_clk, "siumckb_clk", NULL);
+ if (!siumckb_lookup) {
+ ret = -ENOMEM;
+ goto eclkdevalloc;
+ }
+ clkdev_add(siumckb_lookup);
+
/* Port number used on this machine: port B */
migor_snd_device = platform_device_alloc("soc-audio", 1);
if (!migor_snd_device) {
@@ -200,12 +208,15 @@
epdevadd:
platform_device_put(migor_snd_device);
epdevalloc:
+ clkdev_drop(siumckb_lookup);
+eclkdevalloc:
clk_unregister(&siumckb_clk);
return ret;
}
static void __exit migor_exit(void)
{
+ clkdev_drop(siumckb_lookup);
clk_unregister(&siumckb_clk);
platform_device_unregister(migor_snd_device);
}
diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c
index adbc68c..f6b0d28 100644
--- a/sound/soc/soc-cache.c
+++ b/sound/soc/soc-cache.c
@@ -203,8 +203,9 @@
data[1] = (value >> 8) & 0xff;
data[2] = value & 0xff;
- if (!snd_soc_codec_volatile_register(codec, reg))
- reg_cache[reg] = value;
+ if (!snd_soc_codec_volatile_register(codec, reg)
+ && reg < codec->reg_cache_size)
+ reg_cache[reg] = value;
if (codec->cache_only) {
codec->cache_sync = 1;
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 9feb00c..4eabafa 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -126,7 +126,7 @@
for (idx = 0; idx < 2; idx++) {
subs = &as->substream[idx];
if (!subs->num_formats)
- return;
+ continue;
snd_usb_release_substream_urbs(subs, 1);
subs->interface = -1;
}
@@ -216,6 +216,11 @@
}
switch (protocol) {
+ default:
+ snd_printdd(KERN_WARNING "unknown interface protocol %#02x, assuming v1\n",
+ protocol);
+ /* fall through */
+
case UAC_VERSION_1: {
struct uac1_ac_header_descriptor *h1 = control_header;
@@ -253,10 +258,6 @@
break;
}
-
- default:
- snd_printk(KERN_ERR "unknown protocol version 0x%02x\n", protocol);
- return -EINVAL;
}
return 0;
@@ -465,7 +466,13 @@
goto __error;
}
- chip->ctrl_intf = alts;
+ /*
+ * For devices with more than one control interface, we assume the
+ * first contains the audio controls. We might need a more specific
+ * check here in the future.
+ */
+ if (!chip->ctrl_intf)
+ chip->ctrl_intf = alts;
if (err > 0) {
/* create normal USB audio interfaces */
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index b853f8d..7754a10 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -295,12 +295,11 @@
switch (altsd->bInterfaceProtocol) {
case UAC_VERSION_1:
+ default:
return set_sample_rate_v1(chip, iface, alts, fmt, rate);
case UAC_VERSION_2:
return set_sample_rate_v2(chip, iface, alts, fmt, rate);
}
-
- return -EINVAL;
}
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 1a701f1..ef0a07e 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -275,6 +275,12 @@
/* get audio formats */
switch (protocol) {
+ default:
+ snd_printdd(KERN_WARNING "%d:%u:%d: unknown interface protocol %#02x, assuming v1\n",
+ dev->devnum, iface_no, altno, protocol);
+ protocol = UAC_VERSION_1;
+ /* fall through */
+
case UAC_VERSION_1: {
struct uac1_as_header_descriptor *as =
snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_AS_GENERAL);
@@ -336,11 +342,6 @@
dev->devnum, iface_no, altno, as->bTerminalLink);
continue;
}
-
- default:
- snd_printk(KERN_ERR "%d:%u:%d : unknown interface protocol %04x\n",
- dev->devnum, iface_no, altno, protocol);
- continue;
}
/* get format type */
diff --git a/sound/usb/format.c b/sound/usb/format.c
index 3a13754..6914821 100644
--- a/sound/usb/format.c
+++ b/sound/usb/format.c
@@ -49,7 +49,8 @@
u64 pcm_formats;
switch (protocol) {
- case UAC_VERSION_1: {
+ case UAC_VERSION_1:
+ default: {
struct uac_format_type_i_discrete_descriptor *fmt = _fmt;
sample_width = fmt->bBitResolution;
sample_bytes = fmt->bSubframeSize;
@@ -64,9 +65,6 @@
format <<= 1;
break;
}
-
- default:
- return -EINVAL;
}
pcm_formats = 0;
@@ -384,6 +382,10 @@
* audio class v2 uses class specific EP0 range requests for that.
*/
switch (protocol) {
+ default:
+ snd_printdd(KERN_WARNING "%d:%u:%d : invalid protocol version %d, assuming v1\n",
+ chip->dev->devnum, fp->iface, fp->altsetting, protocol);
+ /* fall through */
case UAC_VERSION_1:
fp->channels = fmt->bNrChannels;
ret = parse_audio_format_rates_v1(chip, fp, (unsigned char *) fmt, 7);
@@ -392,10 +394,6 @@
/* fp->channels is already set in this case */
ret = parse_audio_format_rates_v2(chip, fp);
break;
- default:
- snd_printk(KERN_ERR "%d:%u:%d : invalid protocol version %d\n",
- chip->dev->devnum, fp->iface, fp->altsetting, protocol);
- return -EINVAL;
}
if (fp->channels < 1) {
@@ -438,6 +436,10 @@
fp->channels = 1;
switch (protocol) {
+ default:
+ snd_printdd(KERN_WARNING "%d:%u:%d : invalid protocol version %d, assuming v1\n",
+ chip->dev->devnum, fp->iface, fp->altsetting, protocol);
+ /* fall through */
case UAC_VERSION_1: {
struct uac_format_type_ii_discrete_descriptor *fmt = _fmt;
brate = le16_to_cpu(fmt->wMaxBitRate);
@@ -456,10 +458,6 @@
ret = parse_audio_format_rates_v2(chip, fp);
break;
}
- default:
- snd_printk(KERN_ERR "%d:%u:%d : invalid protocol version %d\n",
- chip->dev->devnum, fp->iface, fp->altsetting, protocol);
- return -EINVAL;
}
return ret;
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index c166db0..3ed3901 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -2175,7 +2175,15 @@
}
host_iface = &usb_ifnum_to_if(chip->dev, ctrlif)->altsetting[0];
- mixer->protocol = get_iface_desc(host_iface)->bInterfaceProtocol;
+ switch (get_iface_desc(host_iface)->bInterfaceProtocol) {
+ case UAC_VERSION_1:
+ default:
+ mixer->protocol = UAC_VERSION_1;
+ break;
+ case UAC_VERSION_2:
+ mixer->protocol = UAC_VERSION_2;
+ break;
+ }
if ((err = snd_usb_mixer_controls(mixer)) < 0 ||
(err = snd_usb_mixer_status_create(mixer)) < 0)
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 3634ced..3b5135c 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -173,13 +173,12 @@
switch (altsd->bInterfaceProtocol) {
case UAC_VERSION_1:
+ default:
return init_pitch_v1(chip, iface, alts, fmt);
case UAC_VERSION_2:
return init_pitch_v2(chip, iface, alts, fmt);
}
-
- return -EINVAL;
}
/*
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 624a96c..6de4313 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -50,6 +50,7 @@
INIT_LIST_HEAD(&node->children);
INIT_LIST_HEAD(&node->val);
+ node->children_hit = 0;
node->parent = NULL;
node->hit = 0;
}
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index e72f05c..fcc16e4 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -1539,6 +1539,7 @@
goto error;
}
tev->point.offset = pev->point.offset;
+ tev->point.retprobe = pev->point.retprobe;
tev->nargs = pev->nargs;
if (tev->nargs) {
tev->args = zalloc(sizeof(struct probe_trace_arg)
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 5251366..32b81f7 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -686,6 +686,25 @@
char buf[32], *ptr;
int ret, nscopes;
+ if (!is_c_varname(pf->pvar->var)) {
+ /* Copy raw parameters */
+ pf->tvar->value = strdup(pf->pvar->var);
+ if (pf->tvar->value == NULL)
+ return -ENOMEM;
+ if (pf->pvar->type) {
+ pf->tvar->type = strdup(pf->pvar->type);
+ if (pf->tvar->type == NULL)
+ return -ENOMEM;
+ }
+ if (pf->pvar->name) {
+ pf->tvar->name = strdup(pf->pvar->name);
+ if (pf->tvar->name == NULL)
+ return -ENOMEM;
+ } else
+ pf->tvar->name = NULL;
+ return 0;
+ }
+
if (pf->pvar->name)
pf->tvar->name = strdup(pf->pvar->name);
else {
@@ -700,19 +719,6 @@
if (pf->tvar->name == NULL)
return -ENOMEM;
- if (!is_c_varname(pf->pvar->var)) {
- /* Copy raw parameters */
- pf->tvar->value = strdup(pf->pvar->var);
- if (pf->tvar->value == NULL)
- return -ENOMEM;
- if (pf->pvar->type) {
- pf->tvar->type = strdup(pf->pvar->type);
- if (pf->tvar->type == NULL)
- return -ENOMEM;
- }
- return 0;
- }
-
pr_debug("Searching '%s' variable in context.\n",
pf->pvar->var);
/* Search child die for local variables and parameters. */
@@ -783,6 +789,16 @@
/* This function has no name. */
tev->point.offset = (unsigned long)pf->addr;
+ /* Return probe must be on the head of a subprogram */
+ if (pf->pev->point.retprobe) {
+ if (tev->point.offset != 0) {
+ pr_warning("Return probe must be on the head of"
+ " a real function\n");
+ return -EINVAL;
+ }
+ tev->point.retprobe = true;
+ }
+
pr_debug("Probe point found: %s+%lu\n", tev->point.symbol,
tev->point.offset);
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 1a36773..b2f5ae9 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -2268,6 +2268,9 @@
int symbol__init(void)
{
+ if (symbol_conf.initialized)
+ return 0;
+
elf_version(EV_CURRENT);
if (symbol_conf.sort_by_name)
symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
@@ -2293,6 +2296,7 @@
symbol_conf.sym_list_str, "symbol") < 0)
goto out_free_comm_list;
+ symbol_conf.initialized = true;
return 0;
out_free_dso_list:
@@ -2304,11 +2308,14 @@
void symbol__exit(void)
{
+ if (!symbol_conf.initialized)
+ return;
strlist__delete(symbol_conf.sym_list);
strlist__delete(symbol_conf.dso_list);
strlist__delete(symbol_conf.comm_list);
vmlinux_path__exit();
symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
+ symbol_conf.initialized = false;
}
int machines__create_kernel_maps(struct rb_root *self, pid_t pid)
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index b7a8da4..ea95c27 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -69,7 +69,8 @@
show_nr_samples,
use_callchain,
exclude_other,
- show_cpu_utilization;
+ show_cpu_utilization,
+ initialized;
const char *vmlinux_name,
*source_prefix,
*field_sep;
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 66cf65b..c1f1e3c 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -218,7 +218,6 @@
events = file->f_op->poll(file, &irqfd->pt);
list_add_tail(&irqfd->list, &kvm->irqfds.items);
- spin_unlock_irq(&kvm->irqfds.lock);
/*
* Check if there was an event already pending on the eventfd
@@ -227,6 +226,8 @@
if (events & POLLIN)
schedule_work(&irqfd->inject);
+ spin_unlock_irq(&kvm->irqfds.lock);
+
/*
* do not drop the file until the irqfd is fully initialized, otherwise
* we might race against the POLLHUP
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index b78b794..5186e72 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1958,10 +1958,10 @@
cpu);
hardware_disable(NULL);
break;
- case CPU_ONLINE:
+ case CPU_STARTING:
printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
cpu);
- smp_call_function_single(cpu, hardware_enable, NULL, 1);
+ hardware_enable(NULL);
break;
}
return NOTIFY_OK;
@@ -1970,10 +1970,12 @@
asmlinkage void kvm_handle_fault_on_reboot(void)
{
- if (kvm_rebooting)
+ if (kvm_rebooting) {
/* spin while reset goes on */
+ local_irq_enable();
while (true)
;
+ }
/* Fault while not rebooting. We want the trace. */
BUG();
}
@@ -2096,7 +2098,6 @@
static struct notifier_block kvm_cpu_notifier = {
.notifier_call = kvm_cpu_hotplug,
- .priority = 20, /* must be > scheduler priority */
};
static int vm_stat_get(void *_offset, u64 *val)