From 16423d67936f87e320a7b11771675b982cc9de02 Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Tue, 15 Jul 2014 13:08:36 +0300 Subject: Update MAINTAINERS and CREDITS files with amdkfd info v6: Update entries to reflect new name & location of driver Signed-off-by: Oded Gabbay --- MAINTAINERS | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index ea4d0058fd1b..3c69a3c73028 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -618,6 +618,16 @@ S: Maintained F: drivers/iommu/amd_iommu*.[ch] F: include/linux/amd-iommu.h +AMD KFD +M: Oded Gabbay +L: dri-devel@lists.freedesktop.org +T: git git://people.freedesktop.org/~gabbayo/linux.git +S: Supported +F: drivers/gpu/drm/amd/amdkfd/ +F: drivers/gpu/drm/radeon/radeon_kfd.c +F: drivers/gpu/drm/radeon/radeon_kfd.h +F: include/uapi/linux/kfd_ioctl.h + AMD MICROCODE UPDATE SUPPORT M: Andreas Herrmann L: amd64-microcode@amd64.org -- cgit v1.2.3-59-g8ed1b From 214e0aed639ef40987bf6159fad303171a6de31e Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Wed, 30 Jul 2014 13:41:55 -0700 Subject: locking/Documentation: Move locking related docs into Documentation/locking/ Specifically: Documentation/locking/lockdep-design.txt Documentation/locking/lockstat.txt Documentation/locking/mutex-design.txt Documentation/locking/rt-mutex-design.txt Documentation/locking/rt-mutex.txt Documentation/locking/spinlocks.txt Documentation/locking/ww-mutex-design.txt Signed-off-by: Davidlohr Bueso Acked-by: Randy Dunlap Signed-off-by: Peter Zijlstra Cc: jason.low2@hp.com Cc: aswin@hp.com Cc: Alexei Starovoitov Cc: Al Viro Cc: Andrew Morton Cc: Chris Mason Cc: Dan Streetman Cc: David Airlie Cc: Davidlohr Bueso Cc: David S. Miller Cc: Greg Kroah-Hartman Cc: Heiko Carstens Cc: Jason Low Cc: Josef Bacik Cc: Kees Cook Cc: Linus Torvalds Cc: Lubomir Rintel Cc: Masanari Iida Cc: Paul E. McKenney Cc: Randy Dunlap Cc: Tim Chen Cc: Vineet Gupta Cc: fengguang.wu@intel.com Link: http://lkml.kernel.org/r/1406752916-3341-6-git-send-email-davidlohr@hp.com Signed-off-by: Ingo Molnar --- Documentation/00-INDEX | 2 + Documentation/DocBook/kernel-locking.tmpl | 2 +- Documentation/lockdep-design.txt | 286 ----------- Documentation/locking/lockdep-design.txt | 286 +++++++++++ Documentation/locking/lockstat.txt | 178 +++++++ Documentation/locking/mutex-design.txt | 157 ++++++ Documentation/locking/rt-mutex-design.txt | 781 ++++++++++++++++++++++++++++++ Documentation/locking/rt-mutex.txt | 79 +++ Documentation/locking/spinlocks.txt | 167 +++++++ Documentation/locking/ww-mutex-design.txt | 344 +++++++++++++ Documentation/lockstat.txt | 178 ------- Documentation/mutex-design.txt | 157 ------ Documentation/rt-mutex-design.txt | 781 ------------------------------ Documentation/rt-mutex.txt | 79 --- Documentation/spinlocks.txt | 167 ------- Documentation/ww-mutex-design.txt | 344 ------------- MAINTAINERS | 4 +- drivers/gpu/drm/drm_modeset_lock.c | 2 +- include/linux/lockdep.h | 2 +- include/linux/mutex.h | 2 +- include/linux/rwsem.h | 2 +- kernel/locking/mutex.c | 2 +- kernel/locking/rtmutex.c | 2 +- lib/Kconfig.debug | 4 +- 24 files changed, 2005 insertions(+), 2003 deletions(-) delete mode 100644 Documentation/lockdep-design.txt create mode 100644 Documentation/locking/lockdep-design.txt create mode 100644 Documentation/locking/lockstat.txt create mode 100644 Documentation/locking/mutex-design.txt create mode 100644 Documentation/locking/rt-mutex-design.txt create mode 100644 Documentation/locking/rt-mutex.txt create mode 100644 Documentation/locking/spinlocks.txt create mode 100644 Documentation/locking/ww-mutex-design.txt delete mode 100644 Documentation/lockstat.txt delete mode 100644 Documentation/mutex-design.txt delete mode 100644 Documentation/rt-mutex-design.txt delete mode 100644 Documentation/rt-mutex.txt delete mode 100644 Documentation/spinlocks.txt delete mode 100644 Documentation/ww-mutex-design.txt (limited to 'MAINTAINERS') diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX index 27e67a98b7be..1750fcef1ab4 100644 --- a/Documentation/00-INDEX +++ b/Documentation/00-INDEX @@ -287,6 +287,8 @@ local_ops.txt - semantics and behavior of local atomic operations. lockdep-design.txt - documentation on the runtime locking correctness validator. +locking/ + - directory with info about kernel locking primitives lockstat.txt - info on collecting statistics on locks (and contention). lockup-watchdogs.txt diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl index e584ee12a1e7..7c9cc4846cb6 100644 --- a/Documentation/DocBook/kernel-locking.tmpl +++ b/Documentation/DocBook/kernel-locking.tmpl @@ -1972,7 +1972,7 @@ machines due to caching. - Documentation/spinlocks.txt: + Documentation/locking/spinlocks.txt: Linus Torvalds' spinlocking tutorial in the kernel sources. diff --git a/Documentation/lockdep-design.txt b/Documentation/lockdep-design.txt deleted file mode 100644 index 5dbc99c04f6e..000000000000 --- a/Documentation/lockdep-design.txt +++ /dev/null @@ -1,286 +0,0 @@ -Runtime locking correctness validator -===================================== - -started by Ingo Molnar -additions by Arjan van de Ven - -Lock-class ----------- - -The basic object the validator operates upon is a 'class' of locks. - -A class of locks is a group of locks that are logically the same with -respect to locking rules, even if the locks may have multiple (possibly -tens of thousands of) instantiations. For example a lock in the inode -struct is one class, while each inode has its own instantiation of that -lock class. - -The validator tracks the 'state' of lock-classes, and it tracks -dependencies between different lock-classes. The validator maintains a -rolling proof that the state and the dependencies are correct. - -Unlike an lock instantiation, the lock-class itself never goes away: when -a lock-class is used for the first time after bootup it gets registered, -and all subsequent uses of that lock-class will be attached to this -lock-class. - -State ------ - -The validator tracks lock-class usage history into 4n + 1 separate state bits: - -- 'ever held in STATE context' -- 'ever held as readlock in STATE context' -- 'ever held with STATE enabled' -- 'ever held as readlock with STATE enabled' - -Where STATE can be either one of (kernel/lockdep_states.h) - - hardirq - - softirq - - reclaim_fs - -- 'ever used' [ == !unused ] - -When locking rules are violated, these state bits are presented in the -locking error messages, inside curlies. A contrived example: - - modprobe/2287 is trying to acquire lock: - (&sio_locks[i].lock){-.-...}, at: [] mutex_lock+0x21/0x24 - - but task is already holding lock: - (&sio_locks[i].lock){-.-...}, at: [] mutex_lock+0x21/0x24 - - -The bit position indicates STATE, STATE-read, for each of the states listed -above, and the character displayed in each indicates: - - '.' acquired while irqs disabled and not in irq context - '-' acquired in irq context - '+' acquired with irqs enabled - '?' acquired in irq context with irqs enabled. - -Unused mutexes cannot be part of the cause of an error. - - -Single-lock state rules: ------------------------- - -A softirq-unsafe lock-class is automatically hardirq-unsafe as well. The -following states are exclusive, and only one of them is allowed to be -set for any lock-class: - - and - and - -The validator detects and reports lock usage that violate these -single-lock state rules. - -Multi-lock dependency rules: ----------------------------- - -The same lock-class must not be acquired twice, because this could lead -to lock recursion deadlocks. - -Furthermore, two locks may not be taken in different order: - - -> - -> - -because this could lead to lock inversion deadlocks. (The validator -finds such dependencies in arbitrary complexity, i.e. there can be any -other locking sequence between the acquire-lock operations, the -validator will still track all dependencies between locks.) - -Furthermore, the following usage based lock dependencies are not allowed -between any two lock-classes: - - -> - -> - -The first rule comes from the fact the a hardirq-safe lock could be -taken by a hardirq context, interrupting a hardirq-unsafe lock - and -thus could result in a lock inversion deadlock. Likewise, a softirq-safe -lock could be taken by an softirq context, interrupting a softirq-unsafe -lock. - -The above rules are enforced for any locking sequence that occurs in the -kernel: when acquiring a new lock, the validator checks whether there is -any rule violation between the new lock and any of the held locks. - -When a lock-class changes its state, the following aspects of the above -dependency rules are enforced: - -- if a new hardirq-safe lock is discovered, we check whether it - took any hardirq-unsafe lock in the past. - -- if a new softirq-safe lock is discovered, we check whether it took - any softirq-unsafe lock in the past. - -- if a new hardirq-unsafe lock is discovered, we check whether any - hardirq-safe lock took it in the past. - -- if a new softirq-unsafe lock is discovered, we check whether any - softirq-safe lock took it in the past. - -(Again, we do these checks too on the basis that an interrupt context -could interrupt _any_ of the irq-unsafe or hardirq-unsafe locks, which -could lead to a lock inversion deadlock - even if that lock scenario did -not trigger in practice yet.) - -Exception: Nested data dependencies leading to nested locking -------------------------------------------------------------- - -There are a few cases where the Linux kernel acquires more than one -instance of the same lock-class. Such cases typically happen when there -is some sort of hierarchy within objects of the same type. In these -cases there is an inherent "natural" ordering between the two objects -(defined by the properties of the hierarchy), and the kernel grabs the -locks in this fixed order on each of the objects. - -An example of such an object hierarchy that results in "nested locking" -is that of a "whole disk" block-dev object and a "partition" block-dev -object; the partition is "part of" the whole device and as long as one -always takes the whole disk lock as a higher lock than the partition -lock, the lock ordering is fully correct. The validator does not -automatically detect this natural ordering, as the locking rule behind -the ordering is not static. - -In order to teach the validator about this correct usage model, new -versions of the various locking primitives were added that allow you to -specify a "nesting level". An example call, for the block device mutex, -looks like this: - -enum bdev_bd_mutex_lock_class -{ - BD_MUTEX_NORMAL, - BD_MUTEX_WHOLE, - BD_MUTEX_PARTITION -}; - - mutex_lock_nested(&bdev->bd_contains->bd_mutex, BD_MUTEX_PARTITION); - -In this case the locking is done on a bdev object that is known to be a -partition. - -The validator treats a lock that is taken in such a nested fashion as a -separate (sub)class for the purposes of validation. - -Note: When changing code to use the _nested() primitives, be careful and -check really thoroughly that the hierarchy is correctly mapped; otherwise -you can get false positives or false negatives. - -Proof of 100% correctness: --------------------------- - -The validator achieves perfect, mathematical 'closure' (proof of locking -correctness) in the sense that for every simple, standalone single-task -locking sequence that occurred at least once during the lifetime of the -kernel, the validator proves it with a 100% certainty that no -combination and timing of these locking sequences can cause any class of -lock related deadlock. [*] - -I.e. complex multi-CPU and multi-task locking scenarios do not have to -occur in practice to prove a deadlock: only the simple 'component' -locking chains have to occur at least once (anytime, in any -task/context) for the validator to be able to prove correctness. (For -example, complex deadlocks that would normally need more than 3 CPUs and -a very unlikely constellation of tasks, irq-contexts and timings to -occur, can be detected on a plain, lightly loaded single-CPU system as -well!) - -This radically decreases the complexity of locking related QA of the -kernel: what has to be done during QA is to trigger as many "simple" -single-task locking dependencies in the kernel as possible, at least -once, to prove locking correctness - instead of having to trigger every -possible combination of locking interaction between CPUs, combined with -every possible hardirq and softirq nesting scenario (which is impossible -to do in practice). - -[*] assuming that the validator itself is 100% correct, and no other - part of the system corrupts the state of the validator in any way. - We also assume that all NMI/SMM paths [which could interrupt - even hardirq-disabled codepaths] are correct and do not interfere - with the validator. We also assume that the 64-bit 'chain hash' - value is unique for every lock-chain in the system. Also, lock - recursion must not be higher than 20. - -Performance: ------------- - -The above rules require _massive_ amounts of runtime checking. If we did -that for every lock taken and for every irqs-enable event, it would -render the system practically unusably slow. The complexity of checking -is O(N^2), so even with just a few hundred lock-classes we'd have to do -tens of thousands of checks for every event. - -This problem is solved by checking any given 'locking scenario' (unique -sequence of locks taken after each other) only once. A simple stack of -held locks is maintained, and a lightweight 64-bit hash value is -calculated, which hash is unique for every lock chain. The hash value, -when the chain is validated for the first time, is then put into a hash -table, which hash-table can be checked in a lockfree manner. If the -locking chain occurs again later on, the hash table tells us that we -dont have to validate the chain again. - -Troubleshooting: ----------------- - -The validator tracks a maximum of MAX_LOCKDEP_KEYS number of lock classes. -Exceeding this number will trigger the following lockdep warning: - - (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) - -By default, MAX_LOCKDEP_KEYS is currently set to 8191, and typical -desktop systems have less than 1,000 lock classes, so this warning -normally results from lock-class leakage or failure to properly -initialize locks. These two problems are illustrated below: - -1. Repeated module loading and unloading while running the validator - will result in lock-class leakage. The issue here is that each - load of the module will create a new set of lock classes for - that module's locks, but module unloading does not remove old - classes (see below discussion of reuse of lock classes for why). - Therefore, if that module is loaded and unloaded repeatedly, - the number of lock classes will eventually reach the maximum. - -2. Using structures such as arrays that have large numbers of - locks that are not explicitly initialized. For example, - a hash table with 8192 buckets where each bucket has its own - spinlock_t will consume 8192 lock classes -unless- each spinlock - is explicitly initialized at runtime, for example, using the - run-time spin_lock_init() as opposed to compile-time initializers - such as __SPIN_LOCK_UNLOCKED(). Failure to properly initialize - the per-bucket spinlocks would guarantee lock-class overflow. - In contrast, a loop that called spin_lock_init() on each lock - would place all 8192 locks into a single lock class. - - The moral of this story is that you should always explicitly - initialize your locks. - -One might argue that the validator should be modified to allow -lock classes to be reused. However, if you are tempted to make this -argument, first review the code and think through the changes that would -be required, keeping in mind that the lock classes to be removed are -likely to be linked into the lock-dependency graph. This turns out to -be harder to do than to say. - -Of course, if you do run out of lock classes, the next thing to do is -to find the offending lock classes. First, the following command gives -you the number of lock classes currently in use along with the maximum: - - grep "lock-classes" /proc/lockdep_stats - -This command produces the following output on a modest system: - - lock-classes: 748 [max: 8191] - -If the number allocated (748 above) increases continually over time, -then there is likely a leak. The following command can be used to -identify the leaking lock classes: - - grep "BD" /proc/lockdep - -Run the command and save the output, then compare against the output from -a later run of this command to identify the leakers. This same output -can also help you find situations where runtime lock initialization has -been omitted. diff --git a/Documentation/locking/lockdep-design.txt b/Documentation/locking/lockdep-design.txt new file mode 100644 index 000000000000..5dbc99c04f6e --- /dev/null +++ b/Documentation/locking/lockdep-design.txt @@ -0,0 +1,286 @@ +Runtime locking correctness validator +===================================== + +started by Ingo Molnar +additions by Arjan van de Ven + +Lock-class +---------- + +The basic object the validator operates upon is a 'class' of locks. + +A class of locks is a group of locks that are logically the same with +respect to locking rules, even if the locks may have multiple (possibly +tens of thousands of) instantiations. For example a lock in the inode +struct is one class, while each inode has its own instantiation of that +lock class. + +The validator tracks the 'state' of lock-classes, and it tracks +dependencies between different lock-classes. The validator maintains a +rolling proof that the state and the dependencies are correct. + +Unlike an lock instantiation, the lock-class itself never goes away: when +a lock-class is used for the first time after bootup it gets registered, +and all subsequent uses of that lock-class will be attached to this +lock-class. + +State +----- + +The validator tracks lock-class usage history into 4n + 1 separate state bits: + +- 'ever held in STATE context' +- 'ever held as readlock in STATE context' +- 'ever held with STATE enabled' +- 'ever held as readlock with STATE enabled' + +Where STATE can be either one of (kernel/lockdep_states.h) + - hardirq + - softirq + - reclaim_fs + +- 'ever used' [ == !unused ] + +When locking rules are violated, these state bits are presented in the +locking error messages, inside curlies. A contrived example: + + modprobe/2287 is trying to acquire lock: + (&sio_locks[i].lock){-.-...}, at: [] mutex_lock+0x21/0x24 + + but task is already holding lock: + (&sio_locks[i].lock){-.-...}, at: [] mutex_lock+0x21/0x24 + + +The bit position indicates STATE, STATE-read, for each of the states listed +above, and the character displayed in each indicates: + + '.' acquired while irqs disabled and not in irq context + '-' acquired in irq context + '+' acquired with irqs enabled + '?' acquired in irq context with irqs enabled. + +Unused mutexes cannot be part of the cause of an error. + + +Single-lock state rules: +------------------------ + +A softirq-unsafe lock-class is automatically hardirq-unsafe as well. The +following states are exclusive, and only one of them is allowed to be +set for any lock-class: + + and + and + +The validator detects and reports lock usage that violate these +single-lock state rules. + +Multi-lock dependency rules: +---------------------------- + +The same lock-class must not be acquired twice, because this could lead +to lock recursion deadlocks. + +Furthermore, two locks may not be taken in different order: + + -> + -> + +because this could lead to lock inversion deadlocks. (The validator +finds such dependencies in arbitrary complexity, i.e. there can be any +other locking sequence between the acquire-lock operations, the +validator will still track all dependencies between locks.) + +Furthermore, the following usage based lock dependencies are not allowed +between any two lock-classes: + + -> + -> + +The first rule comes from the fact the a hardirq-safe lock could be +taken by a hardirq context, interrupting a hardirq-unsafe lock - and +thus could result in a lock inversion deadlock. Likewise, a softirq-safe +lock could be taken by an softirq context, interrupting a softirq-unsafe +lock. + +The above rules are enforced for any locking sequence that occurs in the +kernel: when acquiring a new lock, the validator checks whether there is +any rule violation between the new lock and any of the held locks. + +When a lock-class changes its state, the following aspects of the above +dependency rules are enforced: + +- if a new hardirq-safe lock is discovered, we check whether it + took any hardirq-unsafe lock in the past. + +- if a new softirq-safe lock is discovered, we check whether it took + any softirq-unsafe lock in the past. + +- if a new hardirq-unsafe lock is discovered, we check whether any + hardirq-safe lock took it in the past. + +- if a new softirq-unsafe lock is discovered, we check whether any + softirq-safe lock took it in the past. + +(Again, we do these checks too on the basis that an interrupt context +could interrupt _any_ of the irq-unsafe or hardirq-unsafe locks, which +could lead to a lock inversion deadlock - even if that lock scenario did +not trigger in practice yet.) + +Exception: Nested data dependencies leading to nested locking +------------------------------------------------------------- + +There are a few cases where the Linux kernel acquires more than one +instance of the same lock-class. Such cases typically happen when there +is some sort of hierarchy within objects of the same type. In these +cases there is an inherent "natural" ordering between the two objects +(defined by the properties of the hierarchy), and the kernel grabs the +locks in this fixed order on each of the objects. + +An example of such an object hierarchy that results in "nested locking" +is that of a "whole disk" block-dev object and a "partition" block-dev +object; the partition is "part of" the whole device and as long as one +always takes the whole disk lock as a higher lock than the partition +lock, the lock ordering is fully correct. The validator does not +automatically detect this natural ordering, as the locking rule behind +the ordering is not static. + +In order to teach the validator about this correct usage model, new +versions of the various locking primitives were added that allow you to +specify a "nesting level". An example call, for the block device mutex, +looks like this: + +enum bdev_bd_mutex_lock_class +{ + BD_MUTEX_NORMAL, + BD_MUTEX_WHOLE, + BD_MUTEX_PARTITION +}; + + mutex_lock_nested(&bdev->bd_contains->bd_mutex, BD_MUTEX_PARTITION); + +In this case the locking is done on a bdev object that is known to be a +partition. + +The validator treats a lock that is taken in such a nested fashion as a +separate (sub)class for the purposes of validation. + +Note: When changing code to use the _nested() primitives, be careful and +check really thoroughly that the hierarchy is correctly mapped; otherwise +you can get false positives or false negatives. + +Proof of 100% correctness: +-------------------------- + +The validator achieves perfect, mathematical 'closure' (proof of locking +correctness) in the sense that for every simple, standalone single-task +locking sequence that occurred at least once during the lifetime of the +kernel, the validator proves it with a 100% certainty that no +combination and timing of these locking sequences can cause any class of +lock related deadlock. [*] + +I.e. complex multi-CPU and multi-task locking scenarios do not have to +occur in practice to prove a deadlock: only the simple 'component' +locking chains have to occur at least once (anytime, in any +task/context) for the validator to be able to prove correctness. (For +example, complex deadlocks that would normally need more than 3 CPUs and +a very unlikely constellation of tasks, irq-contexts and timings to +occur, can be detected on a plain, lightly loaded single-CPU system as +well!) + +This radically decreases the complexity of locking related QA of the +kernel: what has to be done during QA is to trigger as many "simple" +single-task locking dependencies in the kernel as possible, at least +once, to prove locking correctness - instead of having to trigger every +possible combination of locking interaction between CPUs, combined with +every possible hardirq and softirq nesting scenario (which is impossible +to do in practice). + +[*] assuming that the validator itself is 100% correct, and no other + part of the system corrupts the state of the validator in any way. + We also assume that all NMI/SMM paths [which could interrupt + even hardirq-disabled codepaths] are correct and do not interfere + with the validator. We also assume that the 64-bit 'chain hash' + value is unique for every lock-chain in the system. Also, lock + recursion must not be higher than 20. + +Performance: +------------ + +The above rules require _massive_ amounts of runtime checking. If we did +that for every lock taken and for every irqs-enable event, it would +render the system practically unusably slow. The complexity of checking +is O(N^2), so even with just a few hundred lock-classes we'd have to do +tens of thousands of checks for every event. + +This problem is solved by checking any given 'locking scenario' (unique +sequence of locks taken after each other) only once. A simple stack of +held locks is maintained, and a lightweight 64-bit hash value is +calculated, which hash is unique for every lock chain. The hash value, +when the chain is validated for the first time, is then put into a hash +table, which hash-table can be checked in a lockfree manner. If the +locking chain occurs again later on, the hash table tells us that we +dont have to validate the chain again. + +Troubleshooting: +---------------- + +The validator tracks a maximum of MAX_LOCKDEP_KEYS number of lock classes. +Exceeding this number will trigger the following lockdep warning: + + (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) + +By default, MAX_LOCKDEP_KEYS is currently set to 8191, and typical +desktop systems have less than 1,000 lock classes, so this warning +normally results from lock-class leakage or failure to properly +initialize locks. These two problems are illustrated below: + +1. Repeated module loading and unloading while running the validator + will result in lock-class leakage. The issue here is that each + load of the module will create a new set of lock classes for + that module's locks, but module unloading does not remove old + classes (see below discussion of reuse of lock classes for why). + Therefore, if that module is loaded and unloaded repeatedly, + the number of lock classes will eventually reach the maximum. + +2. Using structures such as arrays that have large numbers of + locks that are not explicitly initialized. For example, + a hash table with 8192 buckets where each bucket has its own + spinlock_t will consume 8192 lock classes -unless- each spinlock + is explicitly initialized at runtime, for example, using the + run-time spin_lock_init() as opposed to compile-time initializers + such as __SPIN_LOCK_UNLOCKED(). Failure to properly initialize + the per-bucket spinlocks would guarantee lock-class overflow. + In contrast, a loop that called spin_lock_init() on each lock + would place all 8192 locks into a single lock class. + + The moral of this story is that you should always explicitly + initialize your locks. + +One might argue that the validator should be modified to allow +lock classes to be reused. However, if you are tempted to make this +argument, first review the code and think through the changes that would +be required, keeping in mind that the lock classes to be removed are +likely to be linked into the lock-dependency graph. This turns out to +be harder to do than to say. + +Of course, if you do run out of lock classes, the next thing to do is +to find the offending lock classes. First, the following command gives +you the number of lock classes currently in use along with the maximum: + + grep "lock-classes" /proc/lockdep_stats + +This command produces the following output on a modest system: + + lock-classes: 748 [max: 8191] + +If the number allocated (748 above) increases continually over time, +then there is likely a leak. The following command can be used to +identify the leaking lock classes: + + grep "BD" /proc/lockdep + +Run the command and save the output, then compare against the output from +a later run of this command to identify the leakers. This same output +can also help you find situations where runtime lock initialization has +been omitted. diff --git a/Documentation/locking/lockstat.txt b/Documentation/locking/lockstat.txt new file mode 100644 index 000000000000..7428773a1e69 --- /dev/null +++ b/Documentation/locking/lockstat.txt @@ -0,0 +1,178 @@ + +LOCK STATISTICS + +- WHAT + +As the name suggests, it provides statistics on locks. + +- WHY + +Because things like lock contention can severely impact performance. + +- HOW + +Lockdep already has hooks in the lock functions and maps lock instances to +lock classes. We build on that (see Documentation/lokcing/lockdep-design.txt). +The graph below shows the relation between the lock functions and the various +hooks therein. + + __acquire + | + lock _____ + | \ + | __contended + | | + | + | _______/ + |/ + | + __acquired + | + . + + . + | + __release + | + unlock + +lock, unlock - the regular lock functions +__* - the hooks +<> - states + +With these hooks we provide the following statistics: + + con-bounces - number of lock contention that involved x-cpu data + contentions - number of lock acquisitions that had to wait + wait time min - shortest (non-0) time we ever had to wait for a lock + max - longest time we ever had to wait for a lock + total - total time we spend waiting on this lock + avg - average time spent waiting on this lock + acq-bounces - number of lock acquisitions that involved x-cpu data + acquisitions - number of times we took the lock + hold time min - shortest (non-0) time we ever held the lock + max - longest time we ever held the lock + total - total time this lock was held + avg - average time this lock was held + +These numbers are gathered per lock class, per read/write state (when +applicable). + +It also tracks 4 contention points per class. A contention point is a call site +that had to wait on lock acquisition. + + - CONFIGURATION + +Lock statistics are enabled via CONFIG_LOCK_STAT. + + - USAGE + +Enable collection of statistics: + +# echo 1 >/proc/sys/kernel/lock_stat + +Disable collection of statistics: + +# echo 0 >/proc/sys/kernel/lock_stat + +Look at the current lock statistics: + +( line numbers not part of actual output, done for clarity in the explanation + below ) + +# less /proc/lock_stat + +01 lock_stat version 0.4 +02----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +03 class name con-bounces contentions waittime-min waittime-max waittime-total waittime-avg acq-bounces acquisitions holdtime-min holdtime-max holdtime-total holdtime-avg +04----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +05 +06 &mm->mmap_sem-W: 46 84 0.26 939.10 16371.53 194.90 47291 2922365 0.16 2220301.69 17464026916.32 5975.99 +07 &mm->mmap_sem-R: 37 100 1.31 299502.61 325629.52 3256.30 212344 34316685 0.10 7744.91 95016910.20 2.77 +08 --------------- +09 &mm->mmap_sem 1 [] khugepaged_scan_mm_slot+0x57/0x280 +19 &mm->mmap_sem 96 [] __do_page_fault+0x1d4/0x510 +11 &mm->mmap_sem 34 [] vm_mmap_pgoff+0x87/0xd0 +12 &mm->mmap_sem 17 [] vm_munmap+0x41/0x80 +13 --------------- +14 &mm->mmap_sem 1 [] dup_mmap+0x2a/0x3f0 +15 &mm->mmap_sem 60 [] SyS_mprotect+0xe9/0x250 +16 &mm->mmap_sem 41 [] __do_page_fault+0x1d4/0x510 +17 &mm->mmap_sem 68 [] vm_mmap_pgoff+0x87/0xd0 +18 +19............................................................................................................................................................................................................................. +20 +21 unix_table_lock: 110 112 0.21 49.24 163.91 1.46 21094 66312 0.12 624.42 31589.81 0.48 +22 --------------- +23 unix_table_lock 45 [] unix_create1+0x16e/0x1b0 +24 unix_table_lock 47 [] unix_release_sock+0x31/0x250 +25 unix_table_lock 15 [] unix_find_other+0x117/0x230 +26 unix_table_lock 5 [] unix_autobind+0x11f/0x1b0 +27 --------------- +28 unix_table_lock 39 [] unix_release_sock+0x31/0x250 +29 unix_table_lock 49 [] unix_create1+0x16e/0x1b0 +30 unix_table_lock 20 [] unix_find_other+0x117/0x230 +31 unix_table_lock 4 [] unix_autobind+0x11f/0x1b0 + + +This excerpt shows the first two lock class statistics. Line 01 shows the +output version - each time the format changes this will be updated. Line 02-04 +show the header with column descriptions. Lines 05-18 and 20-31 show the actual +statistics. These statistics come in two parts; the actual stats separated by a +short separator (line 08, 13) from the contention points. + +The first lock (05-18) is a read/write lock, and shows two lines above the +short separator. The contention points don't match the column descriptors, +they have two: contentions and [] symbol. The second set of contention +points are the points we're contending with. + +The integer part of the time values is in us. + +Dealing with nested locks, subclasses may appear: + +32........................................................................................................................................................................................................................... +33 +34 &rq->lock: 13128 13128 0.43 190.53 103881.26 7.91 97454 3453404 0.00 401.11 13224683.11 3.82 +35 --------- +36 &rq->lock 645 [] task_rq_lock+0x43/0x75 +37 &rq->lock 297 [] try_to_wake_up+0x127/0x25a +38 &rq->lock 360 [] select_task_rq_fair+0x1f0/0x74a +39 &rq->lock 428 [] scheduler_tick+0x46/0x1fb +40 --------- +41 &rq->lock 77 [] task_rq_lock+0x43/0x75 +42 &rq->lock 174 [] try_to_wake_up+0x127/0x25a +43 &rq->lock 4715 [] double_rq_lock+0x42/0x54 +44 &rq->lock 893 [] schedule+0x157/0x7b8 +45 +46........................................................................................................................................................................................................................... +47 +48 &rq->lock/1: 1526 11488 0.33 388.73 136294.31 11.86 21461 38404 0.00 37.93 109388.53 2.84 +49 ----------- +50 &rq->lock/1 11526 [] double_rq_lock+0x4f/0x54 +51 ----------- +52 &rq->lock/1 5645 [] double_rq_lock+0x42/0x54 +53 &rq->lock/1 1224 [] schedule+0x157/0x7b8 +54 &rq->lock/1 4336 [] double_rq_lock+0x4f/0x54 +55 &rq->lock/1 181 [] try_to_wake_up+0x127/0x25a + +Line 48 shows statistics for the second subclass (/1) of &rq->lock class +(subclass starts from 0), since in this case, as line 50 suggests, +double_rq_lock actually acquires a nested lock of two spinlocks. + +View the top contending locks: + +# grep : /proc/lock_stat | head + clockevents_lock: 2926159 2947636 0.15 46882.81 1784540466.34 605.41 3381345 3879161 0.00 2260.97 53178395.68 13.71 + tick_broadcast_lock: 346460 346717 0.18 2257.43 39364622.71 113.54 3642919 4242696 0.00 2263.79 49173646.60 11.59 + &mapping->i_mmap_mutex: 203896 203899 3.36 645530.05 31767507988.39 155800.21 3361776 8893984 0.17 2254.15 14110121.02 1.59 + &rq->lock: 135014 136909 0.18 606.09 842160.68 6.15 1540728 10436146 0.00 728.72 17606683.41 1.69 + &(&zone->lru_lock)->rlock: 93000 94934 0.16 59.18 188253.78 1.98 1199912 3809894 0.15 391.40 3559518.81 0.93 + tasklist_lock-W: 40667 41130 0.23 1189.42 428980.51 10.43 270278 510106 0.16 653.51 3939674.91 7.72 + tasklist_lock-R: 21298 21305 0.20 1310.05 215511.12 10.12 186204 241258 0.14 1162.33 1179779.23 4.89 + rcu_node_1: 47656 49022 0.16 635.41 193616.41 3.95 844888 1865423 0.00 764.26 1656226.96 0.89 + &(&dentry->d_lockref.lock)->rlock: 39791 40179 0.15 1302.08 88851.96 2.21 2790851 12527025 0.10 1910.75 3379714.27 0.27 + rcu_node_0: 29203 30064 0.16 786.55 1555573.00 51.74 88963 244254 0.00 398.87 428872.51 1.76 + +Clear the statistics: + +# echo 0 > /proc/lock_stat diff --git a/Documentation/locking/mutex-design.txt b/Documentation/locking/mutex-design.txt new file mode 100644 index 000000000000..ee231ed09ec6 --- /dev/null +++ b/Documentation/locking/mutex-design.txt @@ -0,0 +1,157 @@ +Generic Mutex Subsystem + +started by Ingo Molnar +updated by Davidlohr Bueso + +What are mutexes? +----------------- + +In the Linux kernel, mutexes refer to a particular locking primitive +that enforces serialization on shared memory systems, and not only to +the generic term referring to 'mutual exclusion' found in academia +or similar theoretical text books. Mutexes are sleeping locks which +behave similarly to binary semaphores, and were introduced in 2006[1] +as an alternative to these. This new data structure provided a number +of advantages, including simpler interfaces, and at that time smaller +code (see Disadvantages). + +[1] http://lwn.net/Articles/164802/ + +Implementation +-------------- + +Mutexes are represented by 'struct mutex', defined in include/linux/mutex.h +and implemented in kernel/locking/mutex.c. These locks use a three +state atomic counter (->count) to represent the different possible +transitions that can occur during the lifetime of a lock: + + 1: unlocked + 0: locked, no waiters + negative: locked, with potential waiters + +In its most basic form it also includes a wait-queue and a spinlock +that serializes access to it. CONFIG_SMP systems can also include +a pointer to the lock task owner (->owner) as well as a spinner MCS +lock (->osq), both described below in (ii). + +When acquiring a mutex, there are three possible paths that can be +taken, depending on the state of the lock: + +(i) fastpath: tries to atomically acquire the lock by decrementing the + counter. If it was already taken by another task it goes to the next + possible path. This logic is architecture specific. On x86-64, the + locking fastpath is 2 instructions: + + 0000000000000e10 : + e21: f0 ff 0b lock decl (%rbx) + e24: 79 08 jns e2e + + the unlocking fastpath is equally tight: + + 0000000000000bc0 : + bc8: f0 ff 07 lock incl (%rdi) + bcb: 7f 0a jg bd7 + + +(ii) midpath: aka optimistic spinning, tries to spin for acquisition + while the lock owner is running and there are no other tasks ready + to run that have higher priority (need_resched). The rationale is + that if the lock owner is running, it is likely to release the lock + soon. The mutex spinners are queued up using MCS lock so that only + one spinner can compete for the mutex. + + The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spinlock + with the desirable properties of being fair and with each cpu trying + to acquire the lock spinning on a local variable. It avoids expensive + cacheline bouncing that common test-and-set spinlock implementations + incur. An MCS-like lock is specially tailored for optimistic spinning + for sleeping lock implementation. An important feature of the customized + MCS lock is that it has the extra property that spinners are able to exit + the MCS spinlock queue when they need to reschedule. This further helps + avoid situations where MCS spinners that need to reschedule would continue + waiting to spin on mutex owner, only to go directly to slowpath upon + obtaining the MCS lock. + + +(iii) slowpath: last resort, if the lock is still unable to be acquired, + the task is added to the wait-queue and sleeps until woken up by the + unlock path. Under normal circumstances it blocks as TASK_UNINTERRUPTIBLE. + +While formally kernel mutexes are sleepable locks, it is path (ii) that +makes them more practically a hybrid type. By simply not interrupting a +task and busy-waiting for a few cycles instead of immediately sleeping, +the performance of this lock has been seen to significantly improve a +number of workloads. Note that this technique is also used for rw-semaphores. + +Semantics +--------- + +The mutex subsystem checks and enforces the following rules: + + - Only one task can hold the mutex at a time. + - Only the owner can unlock the mutex. + - Multiple unlocks are not permitted. + - Recursive locking/unlocking is not permitted. + - A mutex must only be initialized via the API (see below). + - A task may not exit with a mutex held. + - Memory areas where held locks reside must not be freed. + - Held mutexes must not be reinitialized. + - Mutexes may not be used in hardware or software interrupt + contexts such as tasklets and timers. + +These semantics are fully enforced when CONFIG DEBUG_MUTEXES is enabled. +In addition, the mutex debugging code also implements a number of other +features that make lock debugging easier and faster: + + - Uses symbolic names of mutexes, whenever they are printed + in debug output. + - Point-of-acquire tracking, symbolic lookup of function names, + list of all locks held in the system, printout of them. + - Owner tracking. + - Detects self-recursing locks and prints out all relevant info. + - Detects multi-task circular deadlocks and prints out all affected + locks and tasks (and only those tasks). + + +Interfaces +---------- +Statically define the mutex: + DEFINE_MUTEX(name); + +Dynamically initialize the mutex: + mutex_init(mutex); + +Acquire the mutex, uninterruptible: + void mutex_lock(struct mutex *lock); + void mutex_lock_nested(struct mutex *lock, unsigned int subclass); + int mutex_trylock(struct mutex *lock); + +Acquire the mutex, interruptible: + int mutex_lock_interruptible_nested(struct mutex *lock, + unsigned int subclass); + int mutex_lock_interruptible(struct mutex *lock); + +Acquire the mutex, interruptible, if dec to 0: + int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); + +Unlock the mutex: + void mutex_unlock(struct mutex *lock); + +Test if the mutex is taken: + int mutex_is_locked(struct mutex *lock); + +Disadvantages +------------- + +Unlike its original design and purpose, 'struct mutex' is larger than +most locks in the kernel. E.g: on x86-64 it is 40 bytes, almost twice +as large as 'struct semaphore' (24 bytes) and 8 bytes shy of the +'struct rw_semaphore' variant. Larger structure sizes mean more CPU +cache and memory footprint. + +When to use mutexes +------------------- + +Unless the strict semantics of mutexes are unsuitable and/or the critical +region prevents the lock from being shared, always prefer them to any other +locking primitive. diff --git a/Documentation/locking/rt-mutex-design.txt b/Documentation/locking/rt-mutex-design.txt new file mode 100644 index 000000000000..8666070d3189 --- /dev/null +++ b/Documentation/locking/rt-mutex-design.txt @@ -0,0 +1,781 @@ +# +# Copyright (c) 2006 Steven Rostedt +# Licensed under the GNU Free Documentation License, Version 1.2 +# + +RT-mutex implementation design +------------------------------ + +This document tries to describe the design of the rtmutex.c implementation. +It doesn't describe the reasons why rtmutex.c exists. For that please see +Documentation/rt-mutex.txt. Although this document does explain problems +that happen without this code, but that is in the concept to understand +what the code actually is doing. + +The goal of this document is to help others understand the priority +inheritance (PI) algorithm that is used, as well as reasons for the +decisions that were made to implement PI in the manner that was done. + + +Unbounded Priority Inversion +---------------------------- + +Priority inversion is when a lower priority process executes while a higher +priority process wants to run. This happens for several reasons, and +most of the time it can't be helped. Anytime a high priority process wants +to use a resource that a lower priority process has (a mutex for example), +the high priority process must wait until the lower priority process is done +with the resource. This is a priority inversion. What we want to prevent +is something called unbounded priority inversion. That is when the high +priority process is prevented from running by a lower priority process for +an undetermined amount of time. + +The classic example of unbounded priority inversion is where you have three +processes, let's call them processes A, B, and C, where A is the highest +priority process, C is the lowest, and B is in between. A tries to grab a lock +that C owns and must wait and lets C run to release the lock. But in the +meantime, B executes, and since B is of a higher priority than C, it preempts C, +but by doing so, it is in fact preempting A which is a higher priority process. +Now there's no way of knowing how long A will be sleeping waiting for C +to release the lock, because for all we know, B is a CPU hog and will +never give C a chance to release the lock. This is called unbounded priority +inversion. + +Here's a little ASCII art to show the problem. + + grab lock L1 (owned by C) + | +A ---+ + C preempted by B + | +C +----+ + +B +--------> + B now keeps A from running. + + +Priority Inheritance (PI) +------------------------- + +There are several ways to solve this issue, but other ways are out of scope +for this document. Here we only discuss PI. + +PI is where a process inherits the priority of another process if the other +process blocks on a lock owned by the current process. To make this easier +to understand, let's use the previous example, with processes A, B, and C again. + +This time, when A blocks on the lock owned by C, C would inherit the priority +of A. So now if B becomes runnable, it would not preempt C, since C now has +the high priority of A. As soon as C releases the lock, it loses its +inherited priority, and A then can continue with the resource that C had. + +Terminology +----------- + +Here I explain some terminology that is used in this document to help describe +the design that is used to implement PI. + +PI chain - The PI chain is an ordered series of locks and processes that cause + processes to inherit priorities from a previous process that is + blocked on one of its locks. This is described in more detail + later in this document. + +mutex - In this document, to differentiate from locks that implement + PI and spin locks that are used in the PI code, from now on + the PI locks will be called a mutex. + +lock - In this document from now on, I will use the term lock when + referring to spin locks that are used to protect parts of the PI + algorithm. These locks disable preemption for UP (when + CONFIG_PREEMPT is enabled) and on SMP prevents multiple CPUs from + entering critical sections simultaneously. + +spin lock - Same as lock above. + +waiter - A waiter is a struct that is stored on the stack of a blocked + process. Since the scope of the waiter is within the code for + a process being blocked on the mutex, it is fine to allocate + the waiter on the process's stack (local variable). This + structure holds a pointer to the task, as well as the mutex that + the task is blocked on. It also has the plist node structures to + place the task in the waiter_list of a mutex as well as the + pi_list of a mutex owner task (described below). + + waiter is sometimes used in reference to the task that is waiting + on a mutex. This is the same as waiter->task. + +waiters - A list of processes that are blocked on a mutex. + +top waiter - The highest priority process waiting on a specific mutex. + +top pi waiter - The highest priority process waiting on one of the mutexes + that a specific process owns. + +Note: task and process are used interchangeably in this document, mostly to + differentiate between two processes that are being described together. + + +PI chain +-------- + +The PI chain is a list of processes and mutexes that may cause priority +inheritance to take place. Multiple chains may converge, but a chain +would never diverge, since a process can't be blocked on more than one +mutex at a time. + +Example: + + Process: A, B, C, D, E + Mutexes: L1, L2, L3, L4 + + A owns: L1 + B blocked on L1 + B owns L2 + C blocked on L2 + C owns L3 + D blocked on L3 + D owns L4 + E blocked on L4 + +The chain would be: + + E->L4->D->L3->C->L2->B->L1->A + +To show where two chains merge, we could add another process F and +another mutex L5 where B owns L5 and F is blocked on mutex L5. + +The chain for F would be: + + F->L5->B->L1->A + +Since a process may own more than one mutex, but never be blocked on more than +one, the chains merge. + +Here we show both chains: + + E->L4->D->L3->C->L2-+ + | + +->B->L1->A + | + F->L5-+ + +For PI to work, the processes at the right end of these chains (or we may +also call it the Top of the chain) must be equal to or higher in priority +than the processes to the left or below in the chain. + +Also since a mutex may have more than one process blocked on it, we can +have multiple chains merge at mutexes. If we add another process G that is +blocked on mutex L2: + + G->L2->B->L1->A + +And once again, to show how this can grow I will show the merging chains +again. + + E->L4->D->L3->C-+ + +->L2-+ + | | + G-+ +->B->L1->A + | + F->L5-+ + + +Plist +----- + +Before I go further and talk about how the PI chain is stored through lists +on both mutexes and processes, I'll explain the plist. This is similar to +the struct list_head functionality that is already in the kernel. +The implementation of plist is out of scope for this document, but it is +very important to understand what it does. + +There are a few differences between plist and list, the most important one +being that plist is a priority sorted linked list. This means that the +priorities of the plist are sorted, such that it takes O(1) to retrieve the +highest priority item in the list. Obviously this is useful to store processes +based on their priorities. + +Another difference, which is important for implementation, is that, unlike +list, the head of the list is a different element than the nodes of a list. +So the head of the list is declared as struct plist_head and nodes that will +be added to the list are declared as struct plist_node. + + +Mutex Waiter List +----------------- + +Every mutex keeps track of all the waiters that are blocked on itself. The mutex +has a plist to store these waiters by priority. This list is protected by +a spin lock that is located in the struct of the mutex. This lock is called +wait_lock. Since the modification of the waiter list is never done in +interrupt context, the wait_lock can be taken without disabling interrupts. + + +Task PI List +------------ + +To keep track of the PI chains, each process has its own PI list. This is +a list of all top waiters of the mutexes that are owned by the process. +Note that this list only holds the top waiters and not all waiters that are +blocked on mutexes owned by the process. + +The top of the task's PI list is always the highest priority task that +is waiting on a mutex that is owned by the task. So if the task has +inherited a priority, it will always be the priority of the task that is +at the top of this list. + +This list is stored in the task structure of a process as a plist called +pi_list. This list is protected by a spin lock also in the task structure, +called pi_lock. This lock may also be taken in interrupt context, so when +locking the pi_lock, interrupts must be disabled. + + +Depth of the PI Chain +--------------------- + +The maximum depth of the PI chain is not dynamic, and could actually be +defined. But is very complex to figure it out, since it depends on all +the nesting of mutexes. Let's look at the example where we have 3 mutexes, +L1, L2, and L3, and four separate functions func1, func2, func3 and func4. +The following shows a locking order of L1->L2->L3, but may not actually +be directly nested that way. + +void func1(void) +{ + mutex_lock(L1); + + /* do anything */ + + mutex_unlock(L1); +} + +void func2(void) +{ + mutex_lock(L1); + mutex_lock(L2); + + /* do something */ + + mutex_unlock(L2); + mutex_unlock(L1); +} + +void func3(void) +{ + mutex_lock(L2); + mutex_lock(L3); + + /* do something else */ + + mutex_unlock(L3); + mutex_unlock(L2); +} + +void func4(void) +{ + mutex_lock(L3); + + /* do something again */ + + mutex_unlock(L3); +} + +Now we add 4 processes that run each of these functions separately. +Processes A, B, C, and D which run functions func1, func2, func3 and func4 +respectively, and such that D runs first and A last. With D being preempted +in func4 in the "do something again" area, we have a locking that follows: + +D owns L3 + C blocked on L3 + C owns L2 + B blocked on L2 + B owns L1 + A blocked on L1 + +And thus we have the chain A->L1->B->L2->C->L3->D. + +This gives us a PI depth of 4 (four processes), but looking at any of the +functions individually, it seems as though they only have at most a locking +depth of two. So, although the locking depth is defined at compile time, +it still is very difficult to find the possibilities of that depth. + +Now since mutexes can be defined by user-land applications, we don't want a DOS +type of application that nests large amounts of mutexes to create a large +PI chain, and have the code holding spin locks while looking at a large +amount of data. So to prevent this, the implementation not only implements +a maximum lock depth, but also only holds at most two different locks at a +time, as it walks the PI chain. More about this below. + + +Mutex owner and flags +--------------------- + +The mutex structure contains a pointer to the owner of the mutex. If the +mutex is not owned, this owner is set to NULL. Since all architectures +have the task structure on at least a four byte alignment (and if this is +not true, the rtmutex.c code will be broken!), this allows for the two +least significant bits to be used as flags. This part is also described +in Documentation/rt-mutex.txt, but will also be briefly described here. + +Bit 0 is used as the "Pending Owner" flag. This is described later. +Bit 1 is used as the "Has Waiters" flags. This is also described later + in more detail, but is set whenever there are waiters on a mutex. + + +cmpxchg Tricks +-------------- + +Some architectures implement an atomic cmpxchg (Compare and Exchange). This +is used (when applicable) to keep the fast path of grabbing and releasing +mutexes short. + +cmpxchg is basically the following function performed atomically: + +unsigned long _cmpxchg(unsigned long *A, unsigned long *B, unsigned long *C) +{ + unsigned long T = *A; + if (*A == *B) { + *A = *C; + } + return T; +} +#define cmpxchg(a,b,c) _cmpxchg(&a,&b,&c) + +This is really nice to have, since it allows you to only update a variable +if the variable is what you expect it to be. You know if it succeeded if +the return value (the old value of A) is equal to B. + +The macro rt_mutex_cmpxchg is used to try to lock and unlock mutexes. If +the architecture does not support CMPXCHG, then this macro is simply set +to fail every time. But if CMPXCHG is supported, then this will +help out extremely to keep the fast path short. + +The use of rt_mutex_cmpxchg with the flags in the owner field help optimize +the system for architectures that support it. This will also be explained +later in this document. + + +Priority adjustments +-------------------- + +The implementation of the PI code in rtmutex.c has several places that a +process must adjust its priority. With the help of the pi_list of a +process this is rather easy to know what needs to be adjusted. + +The functions implementing the task adjustments are rt_mutex_adjust_prio, +__rt_mutex_adjust_prio (same as the former, but expects the task pi_lock +to already be taken), rt_mutex_getprio, and rt_mutex_setprio. + +rt_mutex_getprio and rt_mutex_setprio are only used in __rt_mutex_adjust_prio. + +rt_mutex_getprio returns the priority that the task should have. Either the +task's own normal priority, or if a process of a higher priority is waiting on +a mutex owned by the task, then that higher priority should be returned. +Since the pi_list of a task holds an order by priority list of all the top +waiters of all the mutexes that the task owns, rt_mutex_getprio simply needs +to compare the top pi waiter to its own normal priority, and return the higher +priority back. + +(Note: if looking at the code, you will notice that the lower number of + prio is returned. This is because the prio field in the task structure + is an inverse order of the actual priority. So a "prio" of 5 is + of higher priority than a "prio" of 10.) + +__rt_mutex_adjust_prio examines the result of rt_mutex_getprio, and if the +result does not equal the task's current priority, then rt_mutex_setprio +is called to adjust the priority of the task to the new priority. +Note that rt_mutex_setprio is defined in kernel/sched/core.c to implement the +actual change in priority. + +It is interesting to note that __rt_mutex_adjust_prio can either increase +or decrease the priority of the task. In the case that a higher priority +process has just blocked on a mutex owned by the task, __rt_mutex_adjust_prio +would increase/boost the task's priority. But if a higher priority task +were for some reason to leave the mutex (timeout or signal), this same function +would decrease/unboost the priority of the task. That is because the pi_list +always contains the highest priority task that is waiting on a mutex owned +by the task, so we only need to compare the priority of that top pi waiter +to the normal priority of the given task. + + +High level overview of the PI chain walk +---------------------------------------- + +The PI chain walk is implemented by the function rt_mutex_adjust_prio_chain. + +The implementation has gone through several iterations, and has ended up +with what we believe is the best. It walks the PI chain by only grabbing +at most two locks at a time, and is very efficient. + +The rt_mutex_adjust_prio_chain can be used either to boost or lower process +priorities. + +rt_mutex_adjust_prio_chain is called with a task to be checked for PI +(de)boosting (the owner of a mutex that a process is blocking on), a flag to +check for deadlocking, the mutex that the task owns, and a pointer to a waiter +that is the process's waiter struct that is blocked on the mutex (although this +parameter may be NULL for deboosting). + +For this explanation, I will not mention deadlock detection. This explanation +will try to stay at a high level. + +When this function is called, there are no locks held. That also means +that the state of the owner and lock can change when entered into this function. + +Before this function is called, the task has already had rt_mutex_adjust_prio +performed on it. This means that the task is set to the priority that it +should be at, but the plist nodes of the task's waiter have not been updated +with the new priorities, and that this task may not be in the proper locations +in the pi_lists and wait_lists that the task is blocked on. This function +solves all that. + +A loop is entered, where task is the owner to be checked for PI changes that +was passed by parameter (for the first iteration). The pi_lock of this task is +taken to prevent any more changes to the pi_list of the task. This also +prevents new tasks from completing the blocking on a mutex that is owned by this +task. + +If the task is not blocked on a mutex then the loop is exited. We are at +the top of the PI chain. + +A check is now done to see if the original waiter (the process that is blocked +on the current mutex) is the top pi waiter of the task. That is, is this +waiter on the top of the task's pi_list. If it is not, it either means that +there is another process higher in priority that is blocked on one of the +mutexes that the task owns, or that the waiter has just woken up via a signal +or timeout and has left the PI chain. In either case, the loop is exited, since +we don't need to do any more changes to the priority of the current task, or any +task that owns a mutex that this current task is waiting on. A priority chain +walk is only needed when a new top pi waiter is made to a task. + +The next check sees if the task's waiter plist node has the priority equal to +the priority the task is set at. If they are equal, then we are done with +the loop. Remember that the function started with the priority of the +task adjusted, but the plist nodes that hold the task in other processes +pi_lists have not been adjusted. + +Next, we look at the mutex that the task is blocked on. The mutex's wait_lock +is taken. This is done by a spin_trylock, because the locking order of the +pi_lock and wait_lock goes in the opposite direction. If we fail to grab the +lock, the pi_lock is released, and we restart the loop. + +Now that we have both the pi_lock of the task as well as the wait_lock of +the mutex the task is blocked on, we update the task's waiter's plist node +that is located on the mutex's wait_list. + +Now we release the pi_lock of the task. + +Next the owner of the mutex has its pi_lock taken, so we can update the +task's entry in the owner's pi_list. If the task is the highest priority +process on the mutex's wait_list, then we remove the previous top waiter +from the owner's pi_list, and replace it with the task. + +Note: It is possible that the task was the current top waiter on the mutex, + in which case the task is not yet on the pi_list of the waiter. This + is OK, since plist_del does nothing if the plist node is not on any + list. + +If the task was not the top waiter of the mutex, but it was before we +did the priority updates, that means we are deboosting/lowering the +task. In this case, the task is removed from the pi_list of the owner, +and the new top waiter is added. + +Lastly, we unlock both the pi_lock of the task, as well as the mutex's +wait_lock, and continue the loop again. On the next iteration of the +loop, the previous owner of the mutex will be the task that will be +processed. + +Note: One might think that the owner of this mutex might have changed + since we just grab the mutex's wait_lock. And one could be right. + The important thing to remember is that the owner could not have + become the task that is being processed in the PI chain, since + we have taken that task's pi_lock at the beginning of the loop. + So as long as there is an owner of this mutex that is not the same + process as the tasked being worked on, we are OK. + + Looking closely at the code, one might be confused. The check for the + end of the PI chain is when the task isn't blocked on anything or the + task's waiter structure "task" element is NULL. This check is + protected only by the task's pi_lock. But the code to unlock the mutex + sets the task's waiter structure "task" element to NULL with only + the protection of the mutex's wait_lock, which was not taken yet. + Isn't this a race condition if the task becomes the new owner? + + The answer is No! The trick is the spin_trylock of the mutex's + wait_lock. If we fail that lock, we release the pi_lock of the + task and continue the loop, doing the end of PI chain check again. + + In the code to release the lock, the wait_lock of the mutex is held + the entire time, and it is not let go when we grab the pi_lock of the + new owner of the mutex. So if the switch of a new owner were to happen + after the check for end of the PI chain and the grabbing of the + wait_lock, the unlocking code would spin on the new owner's pi_lock + but never give up the wait_lock. So the PI chain loop is guaranteed to + fail the spin_trylock on the wait_lock, release the pi_lock, and + try again. + + If you don't quite understand the above, that's OK. You don't have to, + unless you really want to make a proof out of it ;) + + +Pending Owners and Lock stealing +-------------------------------- + +One of the flags in the owner field of the mutex structure is "Pending Owner". +What this means is that an owner was chosen by the process releasing the +mutex, but that owner has yet to wake up and actually take the mutex. + +Why is this important? Why can't we just give the mutex to another process +and be done with it? + +The PI code is to help with real-time processes, and to let the highest +priority process run as long as possible with little latencies and delays. +If a high priority process owns a mutex that a lower priority process is +blocked on, when the mutex is released it would be given to the lower priority +process. What if the higher priority process wants to take that mutex again. +The high priority process would fail to take that mutex that it just gave up +and it would need to boost the lower priority process to run with full +latency of that critical section (since the low priority process just entered +it). + +There's no reason a high priority process that gives up a mutex should be +penalized if it tries to take that mutex again. If the new owner of the +mutex has not woken up yet, there's no reason that the higher priority process +could not take that mutex away. + +To solve this, we introduced Pending Ownership and Lock Stealing. When a +new process is given a mutex that it was blocked on, it is only given +pending ownership. This means that it's the new owner, unless a higher +priority process comes in and tries to grab that mutex. If a higher priority +process does come along and wants that mutex, we let the higher priority +process "steal" the mutex from the pending owner (only if it is still pending) +and continue with the mutex. + + +Taking of a mutex (The walk through) +------------------------------------ + +OK, now let's take a look at the detailed walk through of what happens when +taking a mutex. + +The first thing that is tried is the fast taking of the mutex. This is +done when we have CMPXCHG enabled (otherwise the fast taking automatically +fails). Only when the owner field of the mutex is NULL can the lock be +taken with the CMPXCHG and nothing else needs to be done. + +If there is contention on the lock, whether it is owned or pending owner +we go about the slow path (rt_mutex_slowlock). + +The slow path function is where the task's waiter structure is created on +the stack. This is because the waiter structure is only needed for the +scope of this function. The waiter structure holds the nodes to store +the task on the wait_list of the mutex, and if need be, the pi_list of +the owner. + +The wait_lock of the mutex is taken since the slow path of unlocking the +mutex also takes this lock. + +We then call try_to_take_rt_mutex. This is where the architecture that +does not implement CMPXCHG would always grab the lock (if there's no +contention). + +try_to_take_rt_mutex is used every time the task tries to grab a mutex in the +slow path. The first thing that is done here is an atomic setting of +the "Has Waiters" flag of the mutex's owner field. Yes, this could really +be false, because if the mutex has no owner, there are no waiters and +the current task also won't have any waiters. But we don't have the lock +yet, so we assume we are going to be a waiter. The reason for this is to +play nice for those architectures that do have CMPXCHG. By setting this flag +now, the owner of the mutex can't release the mutex without going into the +slow unlock path, and it would then need to grab the wait_lock, which this +code currently holds. So setting the "Has Waiters" flag forces the owner +to synchronize with this code. + +Now that we know that we can't have any races with the owner releasing the +mutex, we check to see if we can take the ownership. This is done if the +mutex doesn't have a owner, or if we can steal the mutex from a pending +owner. Let's look at the situations we have here. + + 1) Has owner that is pending + ---------------------------- + + The mutex has a owner, but it hasn't woken up and the mutex flag + "Pending Owner" is set. The first check is to see if the owner isn't the + current task. This is because this function is also used for the pending + owner to grab the mutex. When a pending owner wakes up, it checks to see + if it can take the mutex, and this is done if the owner is already set to + itself. If so, we succeed and leave the function, clearing the "Pending + Owner" bit. + + If the pending owner is not current, we check to see if the current priority is + higher than the pending owner. If not, we fail the function and return. + + There's also something special about a pending owner. That is a pending owner + is never blocked on a mutex. So there is no PI chain to worry about. It also + means that if the mutex doesn't have any waiters, there's no accounting needed + to update the pending owner's pi_list, since we only worry about processes + blocked on the current mutex. + + If there are waiters on this mutex, and we just stole the ownership, we need + to take the top waiter, remove it from the pi_list of the pending owner, and + add it to the current pi_list. Note that at this moment, the pending owner + is no longer on the list of waiters. This is fine, since the pending owner + would add itself back when it realizes that it had the ownership stolen + from itself. When the pending owner tries to grab the mutex, it will fail + in try_to_take_rt_mutex if the owner field points to another process. + + 2) No owner + ----------- + + If there is no owner (or we successfully stole the lock), we set the owner + of the mutex to current, and set the flag of "Has Waiters" if the current + mutex actually has waiters, or we clear the flag if it doesn't. See, it was + OK that we set that flag early, since now it is cleared. + + 3) Failed to grab ownership + --------------------------- + + The most interesting case is when we fail to take ownership. This means that + there exists an owner, or there's a pending owner with equal or higher + priority than the current task. + +We'll continue on the failed case. + +If the mutex has a timeout, we set up a timer to go off to break us out +of this mutex if we failed to get it after a specified amount of time. + +Now we enter a loop that will continue to try to take ownership of the mutex, or +fail from a timeout or signal. + +Once again we try to take the mutex. This will usually fail the first time +in the loop, since it had just failed to get the mutex. But the second time +in the loop, this would likely succeed, since the task would likely be +the pending owner. + +If the mutex is TASK_INTERRUPTIBLE a check for signals and timeout is done +here. + +The waiter structure has a "task" field that points to the task that is blocked +on the mutex. This field can be NULL the first time it goes through the loop +or if the task is a pending owner and had its mutex stolen. If the "task" +field is NULL then we need to set up the accounting for it. + +Task blocks on mutex +-------------------- + +The accounting of a mutex and process is done with the waiter structure of +the process. The "task" field is set to the process, and the "lock" field +to the mutex. The plist nodes are initialized to the processes current +priority. + +Since the wait_lock was taken at the entry of the slow lock, we can safely +add the waiter to the wait_list. If the current process is the highest +priority process currently waiting on this mutex, then we remove the +previous top waiter process (if it exists) from the pi_list of the owner, +and add the current process to that list. Since the pi_list of the owner +has changed, we call rt_mutex_adjust_prio on the owner to see if the owner +should adjust its priority accordingly. + +If the owner is also blocked on a lock, and had its pi_list changed +(or deadlock checking is on), we unlock the wait_lock of the mutex and go ahead +and run rt_mutex_adjust_prio_chain on the owner, as described earlier. + +Now all locks are released, and if the current process is still blocked on a +mutex (waiter "task" field is not NULL), then we go to sleep (call schedule). + +Waking up in the loop +--------------------- + +The schedule can then wake up for a few reasons. + 1) we were given pending ownership of the mutex. + 2) we received a signal and was TASK_INTERRUPTIBLE + 3) we had a timeout and was TASK_INTERRUPTIBLE + +In any of these cases, we continue the loop and once again try to grab the +ownership of the mutex. If we succeed, we exit the loop, otherwise we continue +and on signal and timeout, will exit the loop, or if we had the mutex stolen +we just simply add ourselves back on the lists and go back to sleep. + +Note: For various reasons, because of timeout and signals, the steal mutex + algorithm needs to be careful. This is because the current process is + still on the wait_list. And because of dynamic changing of priorities, + especially on SCHED_OTHER tasks, the current process can be the + highest priority task on the wait_list. + +Failed to get mutex on Timeout or Signal +---------------------------------------- + +If a timeout or signal occurred, the waiter's "task" field would not be +NULL and the task needs to be taken off the wait_list of the mutex and perhaps +pi_list of the owner. If this process was a high priority process, then +the rt_mutex_adjust_prio_chain needs to be executed again on the owner, +but this time it will be lowering the priorities. + + +Unlocking the Mutex +------------------- + +The unlocking of a mutex also has a fast path for those architectures with +CMPXCHG. Since the taking of a mutex on contention always sets the +"Has Waiters" flag of the mutex's owner, we use this to know if we need to +take the slow path when unlocking the mutex. If the mutex doesn't have any +waiters, the owner field of the mutex would equal the current process and +the mutex can be unlocked by just replacing the owner field with NULL. + +If the owner field has the "Has Waiters" bit set (or CMPXCHG is not available), +the slow unlock path is taken. + +The first thing done in the slow unlock path is to take the wait_lock of the +mutex. This synchronizes the locking and unlocking of the mutex. + +A check is made to see if the mutex has waiters or not. On architectures that +do not have CMPXCHG, this is the location that the owner of the mutex will +determine if a waiter needs to be awoken or not. On architectures that +do have CMPXCHG, that check is done in the fast path, but it is still needed +in the slow path too. If a waiter of a mutex woke up because of a signal +or timeout between the time the owner failed the fast path CMPXCHG check and +the grabbing of the wait_lock, the mutex may not have any waiters, thus the +owner still needs to make this check. If there are no waiters then the mutex +owner field is set to NULL, the wait_lock is released and nothing more is +needed. + +If there are waiters, then we need to wake one up and give that waiter +pending ownership. + +On the wake up code, the pi_lock of the current owner is taken. The top +waiter of the lock is found and removed from the wait_list of the mutex +as well as the pi_list of the current owner. The task field of the new +pending owner's waiter structure is set to NULL, and the owner field of the +mutex is set to the new owner with the "Pending Owner" bit set, as well +as the "Has Waiters" bit if there still are other processes blocked on the +mutex. + +The pi_lock of the previous owner is released, and the new pending owner's +pi_lock is taken. Remember that this is the trick to prevent the race +condition in rt_mutex_adjust_prio_chain from adding itself as a waiter +on the mutex. + +We now clear the "pi_blocked_on" field of the new pending owner, and if +the mutex still has waiters pending, we add the new top waiter to the pi_list +of the pending owner. + +Finally we unlock the pi_lock of the pending owner and wake it up. + + +Contact +------- + +For updates on this document, please email Steven Rostedt + + +Credits +------- + +Author: Steven Rostedt + +Reviewers: Ingo Molnar, Thomas Gleixner, Thomas Duetsch, and Randy Dunlap + +Updates +------- + +This document was originally written for 2.6.17-rc3-mm1 diff --git a/Documentation/locking/rt-mutex.txt b/Documentation/locking/rt-mutex.txt new file mode 100644 index 000000000000..243393d882ee --- /dev/null +++ b/Documentation/locking/rt-mutex.txt @@ -0,0 +1,79 @@ +RT-mutex subsystem with PI support +---------------------------------- + +RT-mutexes with priority inheritance are used to support PI-futexes, +which enable pthread_mutex_t priority inheritance attributes +(PTHREAD_PRIO_INHERIT). [See Documentation/pi-futex.txt for more details +about PI-futexes.] + +This technology was developed in the -rt tree and streamlined for +pthread_mutex support. + +Basic principles: +----------------- + +RT-mutexes extend the semantics of simple mutexes by the priority +inheritance protocol. + +A low priority owner of a rt-mutex inherits the priority of a higher +priority waiter until the rt-mutex is released. If the temporarily +boosted owner blocks on a rt-mutex itself it propagates the priority +boosting to the owner of the other rt_mutex it gets blocked on. The +priority boosting is immediately removed once the rt_mutex has been +unlocked. + +This approach allows us to shorten the block of high-prio tasks on +mutexes which protect shared resources. Priority inheritance is not a +magic bullet for poorly designed applications, but it allows +well-designed applications to use userspace locks in critical parts of +an high priority thread, without losing determinism. + +The enqueueing of the waiters into the rtmutex waiter list is done in +priority order. For same priorities FIFO order is chosen. For each +rtmutex, only the top priority waiter is enqueued into the owner's +priority waiters list. This list too queues in priority order. Whenever +the top priority waiter of a task changes (for example it timed out or +got a signal), the priority of the owner task is readjusted. [The +priority enqueueing is handled by "plists", see include/linux/plist.h +for more details.] + +RT-mutexes are optimized for fastpath operations and have no internal +locking overhead when locking an uncontended mutex or unlocking a mutex +without waiters. The optimized fastpath operations require cmpxchg +support. [If that is not available then the rt-mutex internal spinlock +is used] + +The state of the rt-mutex is tracked via the owner field of the rt-mutex +structure: + +rt_mutex->owner holds the task_struct pointer of the owner. Bit 0 and 1 +are used to keep track of the "owner is pending" and "rtmutex has +waiters" state. + + owner bit1 bit0 + NULL 0 0 mutex is free (fast acquire possible) + NULL 0 1 invalid state + NULL 1 0 Transitional state* + NULL 1 1 invalid state + taskpointer 0 0 mutex is held (fast release possible) + taskpointer 0 1 task is pending owner + taskpointer 1 0 mutex is held and has waiters + taskpointer 1 1 task is pending owner and mutex has waiters + +Pending-ownership handling is a performance optimization: +pending-ownership is assigned to the first (highest priority) waiter of +the mutex, when the mutex is released. The thread is woken up and once +it starts executing it can acquire the mutex. Until the mutex is taken +by it (bit 0 is cleared) a competing higher priority thread can "steal" +the mutex which puts the woken up thread back on the waiters list. + +The pending-ownership optimization is especially important for the +uninterrupted workflow of high-prio tasks which repeatedly +takes/releases locks that have lower-prio waiters. Without this +optimization the higher-prio thread would ping-pong to the lower-prio +task [because at unlock time we always assign a new owner]. + +(*) The "mutex has waiters" bit gets set to take the lock. If the lock +doesn't already have an owner, this bit is quickly cleared if there are +no waiters. So this is a transitional state to synchronize with looking +at the owner field of the mutex and the mutex owner releasing the lock. diff --git a/Documentation/locking/spinlocks.txt b/Documentation/locking/spinlocks.txt new file mode 100644 index 000000000000..ff35e40bdf5b --- /dev/null +++ b/Documentation/locking/spinlocks.txt @@ -0,0 +1,167 @@ +Lesson 1: Spin locks + +The most basic primitive for locking is spinlock. + +static DEFINE_SPINLOCK(xxx_lock); + + unsigned long flags; + + spin_lock_irqsave(&xxx_lock, flags); + ... critical section here .. + spin_unlock_irqrestore(&xxx_lock, flags); + +The above is always safe. It will disable interrupts _locally_, but the +spinlock itself will guarantee the global lock, so it will guarantee that +there is only one thread-of-control within the region(s) protected by that +lock. This works well even under UP also, so the code does _not_ need to +worry about UP vs SMP issues: the spinlocks work correctly under both. + + NOTE! Implications of spin_locks for memory are further described in: + + Documentation/memory-barriers.txt + (5) LOCK operations. + (6) UNLOCK operations. + +The above is usually pretty simple (you usually need and want only one +spinlock for most things - using more than one spinlock can make things a +lot more complex and even slower and is usually worth it only for +sequences that you _know_ need to be split up: avoid it at all cost if you +aren't sure). + +This is really the only really hard part about spinlocks: once you start +using spinlocks they tend to expand to areas you might not have noticed +before, because you have to make sure the spinlocks correctly protect the +shared data structures _everywhere_ they are used. The spinlocks are most +easily added to places that are completely independent of other code (for +example, internal driver data structures that nobody else ever touches). + + NOTE! The spin-lock is safe only when you _also_ use the lock itself + to do locking across CPU's, which implies that EVERYTHING that + touches a shared variable has to agree about the spinlock they want + to use. + +---- + +Lesson 2: reader-writer spinlocks. + +If your data accesses have a very natural pattern where you usually tend +to mostly read from the shared variables, the reader-writer locks +(rw_lock) versions of the spinlocks are sometimes useful. They allow multiple +readers to be in the same critical region at once, but if somebody wants +to change the variables it has to get an exclusive write lock. + + NOTE! reader-writer locks require more atomic memory operations than + simple spinlocks. Unless the reader critical section is long, you + are better off just using spinlocks. + +The routines look the same as above: + + rwlock_t xxx_lock = __RW_LOCK_UNLOCKED(xxx_lock); + + unsigned long flags; + + read_lock_irqsave(&xxx_lock, flags); + .. critical section that only reads the info ... + read_unlock_irqrestore(&xxx_lock, flags); + + write_lock_irqsave(&xxx_lock, flags); + .. read and write exclusive access to the info ... + write_unlock_irqrestore(&xxx_lock, flags); + +The above kind of lock may be useful for complex data structures like +linked lists, especially searching for entries without changing the list +itself. The read lock allows many concurrent readers. Anything that +_changes_ the list will have to get the write lock. + + NOTE! RCU is better for list traversal, but requires careful + attention to design detail (see Documentation/RCU/listRCU.txt). + +Also, you cannot "upgrade" a read-lock to a write-lock, so if you at _any_ +time need to do any changes (even if you don't do it every time), you have +to get the write-lock at the very beginning. + + NOTE! We are working hard to remove reader-writer spinlocks in most + cases, so please don't add a new one without consensus. (Instead, see + Documentation/RCU/rcu.txt for complete information.) + +---- + +Lesson 3: spinlocks revisited. + +The single spin-lock primitives above are by no means the only ones. They +are the most safe ones, and the ones that work under all circumstances, +but partly _because_ they are safe they are also fairly slow. They are slower +than they'd need to be, because they do have to disable interrupts +(which is just a single instruction on a x86, but it's an expensive one - +and on other architectures it can be worse). + +If you have a case where you have to protect a data structure across +several CPU's and you want to use spinlocks you can potentially use +cheaper versions of the spinlocks. IFF you know that the spinlocks are +never used in interrupt handlers, you can use the non-irq versions: + + spin_lock(&lock); + ... + spin_unlock(&lock); + +(and the equivalent read-write versions too, of course). The spinlock will +guarantee the same kind of exclusive access, and it will be much faster. +This is useful if you know that the data in question is only ever +manipulated from a "process context", ie no interrupts involved. + +The reasons you mustn't use these versions if you have interrupts that +play with the spinlock is that you can get deadlocks: + + spin_lock(&lock); + ... + <- interrupt comes in: + spin_lock(&lock); + +where an interrupt tries to lock an already locked variable. This is ok if +the other interrupt happens on another CPU, but it is _not_ ok if the +interrupt happens on the same CPU that already holds the lock, because the +lock will obviously never be released (because the interrupt is waiting +for the lock, and the lock-holder is interrupted by the interrupt and will +not continue until the interrupt has been processed). + +(This is also the reason why the irq-versions of the spinlocks only need +to disable the _local_ interrupts - it's ok to use spinlocks in interrupts +on other CPU's, because an interrupt on another CPU doesn't interrupt the +CPU that holds the lock, so the lock-holder can continue and eventually +releases the lock). + +Note that you can be clever with read-write locks and interrupts. For +example, if you know that the interrupt only ever gets a read-lock, then +you can use a non-irq version of read locks everywhere - because they +don't block on each other (and thus there is no dead-lock wrt interrupts. +But when you do the write-lock, you have to use the irq-safe version. + +For an example of being clever with rw-locks, see the "waitqueue_lock" +handling in kernel/sched/core.c - nothing ever _changes_ a wait-queue from +within an interrupt, they only read the queue in order to know whom to +wake up. So read-locks are safe (which is good: they are very common +indeed), while write-locks need to protect themselves against interrupts. + + Linus + +---- + +Reference information: + +For dynamic initialization, use spin_lock_init() or rwlock_init() as +appropriate: + + spinlock_t xxx_lock; + rwlock_t xxx_rw_lock; + + static int __init xxx_init(void) + { + spin_lock_init(&xxx_lock); + rwlock_init(&xxx_rw_lock); + ... + } + + module_init(xxx_init); + +For static initialization, use DEFINE_SPINLOCK() / DEFINE_RWLOCK() or +__SPIN_LOCK_UNLOCKED() / __RW_LOCK_UNLOCKED() as appropriate. diff --git a/Documentation/locking/ww-mutex-design.txt b/Documentation/locking/ww-mutex-design.txt new file mode 100644 index 000000000000..8a112dc304c3 --- /dev/null +++ b/Documentation/locking/ww-mutex-design.txt @@ -0,0 +1,344 @@ +Wait/Wound Deadlock-Proof Mutex Design +====================================== + +Please read mutex-design.txt first, as it applies to wait/wound mutexes too. + +Motivation for WW-Mutexes +------------------------- + +GPU's do operations that commonly involve many buffers. Those buffers +can be shared across contexts/processes, exist in different memory +domains (for example VRAM vs system memory), and so on. And with +PRIME / dmabuf, they can even be shared across devices. So there are +a handful of situations where the driver needs to wait for buffers to +become ready. If you think about this in terms of waiting on a buffer +mutex for it to become available, this presents a problem because +there is no way to guarantee that buffers appear in a execbuf/batch in +the same order in all contexts. That is directly under control of +userspace, and a result of the sequence of GL calls that an application +makes. Which results in the potential for deadlock. The problem gets +more complex when you consider that the kernel may need to migrate the +buffer(s) into VRAM before the GPU operates on the buffer(s), which +may in turn require evicting some other buffers (and you don't want to +evict other buffers which are already queued up to the GPU), but for a +simplified understanding of the problem you can ignore this. + +The algorithm that the TTM graphics subsystem came up with for dealing with +this problem is quite simple. For each group of buffers (execbuf) that need +to be locked, the caller would be assigned a unique reservation id/ticket, +from a global counter. In case of deadlock while locking all the buffers +associated with a execbuf, the one with the lowest reservation ticket (i.e. +the oldest task) wins, and the one with the higher reservation id (i.e. the +younger task) unlocks all of the buffers that it has already locked, and then +tries again. + +In the RDBMS literature this deadlock handling approach is called wait/wound: +The older tasks waits until it can acquire the contended lock. The younger tasks +needs to back off and drop all the locks it is currently holding, i.e. the +younger task is wounded. + +Concepts +-------- + +Compared to normal mutexes two additional concepts/objects show up in the lock +interface for w/w mutexes: + +Acquire context: To ensure eventual forward progress it is important the a task +trying to acquire locks doesn't grab a new reservation id, but keeps the one it +acquired when starting the lock acquisition. This ticket is stored in the +acquire context. Furthermore the acquire context keeps track of debugging state +to catch w/w mutex interface abuse. + +W/w class: In contrast to normal mutexes the lock class needs to be explicit for +w/w mutexes, since it is required to initialize the acquire context. + +Furthermore there are three different class of w/w lock acquire functions: + +* Normal lock acquisition with a context, using ww_mutex_lock. + +* Slowpath lock acquisition on the contending lock, used by the wounded task + after having dropped all already acquired locks. These functions have the + _slow postfix. + + From a simple semantics point-of-view the _slow functions are not strictly + required, since simply calling the normal ww_mutex_lock functions on the + contending lock (after having dropped all other already acquired locks) will + work correctly. After all if no other ww mutex has been acquired yet there's + no deadlock potential and hence the ww_mutex_lock call will block and not + prematurely return -EDEADLK. The advantage of the _slow functions is in + interface safety: + - ww_mutex_lock has a __must_check int return type, whereas ww_mutex_lock_slow + has a void return type. Note that since ww mutex code needs loops/retries + anyway the __must_check doesn't result in spurious warnings, even though the + very first lock operation can never fail. + - When full debugging is enabled ww_mutex_lock_slow checks that all acquired + ww mutex have been released (preventing deadlocks) and makes sure that we + block on the contending lock (preventing spinning through the -EDEADLK + slowpath until the contended lock can be acquired). + +* Functions to only acquire a single w/w mutex, which results in the exact same + semantics as a normal mutex. This is done by calling ww_mutex_lock with a NULL + context. + + Again this is not strictly required. But often you only want to acquire a + single lock in which case it's pointless to set up an acquire context (and so + better to avoid grabbing a deadlock avoidance ticket). + +Of course, all the usual variants for handling wake-ups due to signals are also +provided. + +Usage +----- + +Three different ways to acquire locks within the same w/w class. Common +definitions for methods #1 and #2: + +static DEFINE_WW_CLASS(ww_class); + +struct obj { + struct ww_mutex lock; + /* obj data */ +}; + +struct obj_entry { + struct list_head head; + struct obj *obj; +}; + +Method 1, using a list in execbuf->buffers that's not allowed to be reordered. +This is useful if a list of required objects is already tracked somewhere. +Furthermore the lock helper can use propagate the -EALREADY return code back to +the caller as a signal that an object is twice on the list. This is useful if +the list is constructed from userspace input and the ABI requires userspace to +not have duplicate entries (e.g. for a gpu commandbuffer submission ioctl). + +int lock_objs(struct list_head *list, struct ww_acquire_ctx *ctx) +{ + struct obj *res_obj = NULL; + struct obj_entry *contended_entry = NULL; + struct obj_entry *entry; + + ww_acquire_init(ctx, &ww_class); + +retry: + list_for_each_entry (entry, list, head) { + if (entry->obj == res_obj) { + res_obj = NULL; + continue; + } + ret = ww_mutex_lock(&entry->obj->lock, ctx); + if (ret < 0) { + contended_entry = entry; + goto err; + } + } + + ww_acquire_done(ctx); + return 0; + +err: + list_for_each_entry_continue_reverse (entry, list, head) + ww_mutex_unlock(&entry->obj->lock); + + if (res_obj) + ww_mutex_unlock(&res_obj->lock); + + if (ret == -EDEADLK) { + /* we lost out in a seqno race, lock and retry.. */ + ww_mutex_lock_slow(&contended_entry->obj->lock, ctx); + res_obj = contended_entry->obj; + goto retry; + } + ww_acquire_fini(ctx); + + return ret; +} + +Method 2, using a list in execbuf->buffers that can be reordered. Same semantics +of duplicate entry detection using -EALREADY as method 1 above. But the +list-reordering allows for a bit more idiomatic code. + +int lock_objs(struct list_head *list, struct ww_acquire_ctx *ctx) +{ + struct obj_entry *entry, *entry2; + + ww_acquire_init(ctx, &ww_class); + + list_for_each_entry (entry, list, head) { + ret = ww_mutex_lock(&entry->obj->lock, ctx); + if (ret < 0) { + entry2 = entry; + + list_for_each_entry_continue_reverse (entry2, list, head) + ww_mutex_unlock(&entry2->obj->lock); + + if (ret != -EDEADLK) { + ww_acquire_fini(ctx); + return ret; + } + + /* we lost out in a seqno race, lock and retry.. */ + ww_mutex_lock_slow(&entry->obj->lock, ctx); + + /* + * Move buf to head of the list, this will point + * buf->next to the first unlocked entry, + * restarting the for loop. + */ + list_del(&entry->head); + list_add(&entry->head, list); + } + } + + ww_acquire_done(ctx); + return 0; +} + +Unlocking works the same way for both methods #1 and #2: + +void unlock_objs(struct list_head *list, struct ww_acquire_ctx *ctx) +{ + struct obj_entry *entry; + + list_for_each_entry (entry, list, head) + ww_mutex_unlock(&entry->obj->lock); + + ww_acquire_fini(ctx); +} + +Method 3 is useful if the list of objects is constructed ad-hoc and not upfront, +e.g. when adjusting edges in a graph where each node has its own ww_mutex lock, +and edges can only be changed when holding the locks of all involved nodes. w/w +mutexes are a natural fit for such a case for two reasons: +- They can handle lock-acquisition in any order which allows us to start walking + a graph from a starting point and then iteratively discovering new edges and + locking down the nodes those edges connect to. +- Due to the -EALREADY return code signalling that a given objects is already + held there's no need for additional book-keeping to break cycles in the graph + or keep track off which looks are already held (when using more than one node + as a starting point). + +Note that this approach differs in two important ways from the above methods: +- Since the list of objects is dynamically constructed (and might very well be + different when retrying due to hitting the -EDEADLK wound condition) there's + no need to keep any object on a persistent list when it's not locked. We can + therefore move the list_head into the object itself. +- On the other hand the dynamic object list construction also means that the -EALREADY return + code can't be propagated. + +Note also that methods #1 and #2 and method #3 can be combined, e.g. to first lock a +list of starting nodes (passed in from userspace) using one of the above +methods. And then lock any additional objects affected by the operations using +method #3 below. The backoff/retry procedure will be a bit more involved, since +when the dynamic locking step hits -EDEADLK we also need to unlock all the +objects acquired with the fixed list. But the w/w mutex debug checks will catch +any interface misuse for these cases. + +Also, method 3 can't fail the lock acquisition step since it doesn't return +-EALREADY. Of course this would be different when using the _interruptible +variants, but that's outside of the scope of these examples here. + +struct obj { + struct ww_mutex ww_mutex; + struct list_head locked_list; +}; + +static DEFINE_WW_CLASS(ww_class); + +void __unlock_objs(struct list_head *list) +{ + struct obj *entry, *temp; + + list_for_each_entry_safe (entry, temp, list, locked_list) { + /* need to do that before unlocking, since only the current lock holder is + allowed to use object */ + list_del(&entry->locked_list); + ww_mutex_unlock(entry->ww_mutex) + } +} + +void lock_objs(struct list_head *list, struct ww_acquire_ctx *ctx) +{ + struct obj *obj; + + ww_acquire_init(ctx, &ww_class); + +retry: + /* re-init loop start state */ + loop { + /* magic code which walks over a graph and decides which objects + * to lock */ + + ret = ww_mutex_lock(obj->ww_mutex, ctx); + if (ret == -EALREADY) { + /* we have that one already, get to the next object */ + continue; + } + if (ret == -EDEADLK) { + __unlock_objs(list); + + ww_mutex_lock_slow(obj, ctx); + list_add(&entry->locked_list, list); + goto retry; + } + + /* locked a new object, add it to the list */ + list_add_tail(&entry->locked_list, list); + } + + ww_acquire_done(ctx); + return 0; +} + +void unlock_objs(struct list_head *list, struct ww_acquire_ctx *ctx) +{ + __unlock_objs(list); + ww_acquire_fini(ctx); +} + +Method 4: Only lock one single objects. In that case deadlock detection and +prevention is obviously overkill, since with grabbing just one lock you can't +produce a deadlock within just one class. To simplify this case the w/w mutex +api can be used with a NULL context. + +Implementation Details +---------------------- + +Design: + ww_mutex currently encapsulates a struct mutex, this means no extra overhead for + normal mutex locks, which are far more common. As such there is only a small + increase in code size if wait/wound mutexes are not used. + + In general, not much contention is expected. The locks are typically used to + serialize access to resources for devices. The only way to make wakeups + smarter would be at the cost of adding a field to struct mutex_waiter. This + would add overhead to all cases where normal mutexes are used, and + ww_mutexes are generally less performance sensitive. + +Lockdep: + Special care has been taken to warn for as many cases of api abuse + as possible. Some common api abuses will be caught with + CONFIG_DEBUG_MUTEXES, but CONFIG_PROVE_LOCKING is recommended. + + Some of the errors which will be warned about: + - Forgetting to call ww_acquire_fini or ww_acquire_init. + - Attempting to lock more mutexes after ww_acquire_done. + - Attempting to lock the wrong mutex after -EDEADLK and + unlocking all mutexes. + - Attempting to lock the right mutex after -EDEADLK, + before unlocking all mutexes. + + - Calling ww_mutex_lock_slow before -EDEADLK was returned. + + - Unlocking mutexes with the wrong unlock function. + - Calling one of the ww_acquire_* twice on the same context. + - Using a different ww_class for the mutex than for the ww_acquire_ctx. + - Normal lockdep errors that can result in deadlocks. + + Some of the lockdep errors that can result in deadlocks: + - Calling ww_acquire_init to initialize a second ww_acquire_ctx before + having called ww_acquire_fini on the first. + - 'normal' deadlocks that can occur. + +FIXME: Update this section once we have the TASK_DEADLOCK task state flag magic +implemented. diff --git a/Documentation/lockstat.txt b/Documentation/lockstat.txt deleted file mode 100644 index 72d010689751..000000000000 --- a/Documentation/lockstat.txt +++ /dev/null @@ -1,178 +0,0 @@ - -LOCK STATISTICS - -- WHAT - -As the name suggests, it provides statistics on locks. - -- WHY - -Because things like lock contention can severely impact performance. - -- HOW - -Lockdep already has hooks in the lock functions and maps lock instances to -lock classes. We build on that (see Documentation/lockdep-design.txt). -The graph below shows the relation between the lock functions and the various -hooks therein. - - __acquire - | - lock _____ - | \ - | __contended - | | - | - | _______/ - |/ - | - __acquired - | - . - - . - | - __release - | - unlock - -lock, unlock - the regular lock functions -__* - the hooks -<> - states - -With these hooks we provide the following statistics: - - con-bounces - number of lock contention that involved x-cpu data - contentions - number of lock acquisitions that had to wait - wait time min - shortest (non-0) time we ever had to wait for a lock - max - longest time we ever had to wait for a lock - total - total time we spend waiting on this lock - avg - average time spent waiting on this lock - acq-bounces - number of lock acquisitions that involved x-cpu data - acquisitions - number of times we took the lock - hold time min - shortest (non-0) time we ever held the lock - max - longest time we ever held the lock - total - total time this lock was held - avg - average time this lock was held - -These numbers are gathered per lock class, per read/write state (when -applicable). - -It also tracks 4 contention points per class. A contention point is a call site -that had to wait on lock acquisition. - - - CONFIGURATION - -Lock statistics are enabled via CONFIG_LOCK_STAT. - - - USAGE - -Enable collection of statistics: - -# echo 1 >/proc/sys/kernel/lock_stat - -Disable collection of statistics: - -# echo 0 >/proc/sys/kernel/lock_stat - -Look at the current lock statistics: - -( line numbers not part of actual output, done for clarity in the explanation - below ) - -# less /proc/lock_stat - -01 lock_stat version 0.4 -02----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -03 class name con-bounces contentions waittime-min waittime-max waittime-total waittime-avg acq-bounces acquisitions holdtime-min holdtime-max holdtime-total holdtime-avg -04----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -05 -06 &mm->mmap_sem-W: 46 84 0.26 939.10 16371.53 194.90 47291 2922365 0.16 2220301.69 17464026916.32 5975.99 -07 &mm->mmap_sem-R: 37 100 1.31 299502.61 325629.52 3256.30 212344 34316685 0.10 7744.91 95016910.20 2.77 -08 --------------- -09 &mm->mmap_sem 1 [] khugepaged_scan_mm_slot+0x57/0x280 -19 &mm->mmap_sem 96 [] __do_page_fault+0x1d4/0x510 -11 &mm->mmap_sem 34 [] vm_mmap_pgoff+0x87/0xd0 -12 &mm->mmap_sem 17 [] vm_munmap+0x41/0x80 -13 --------------- -14 &mm->mmap_sem 1 [] dup_mmap+0x2a/0x3f0 -15 &mm->mmap_sem 60 [] SyS_mprotect+0xe9/0x250 -16 &mm->mmap_sem 41 [] __do_page_fault+0x1d4/0x510 -17 &mm->mmap_sem 68 [] vm_mmap_pgoff+0x87/0xd0 -18 -19............................................................................................................................................................................................................................. -20 -21 unix_table_lock: 110 112 0.21 49.24 163.91 1.46 21094 66312 0.12 624.42 31589.81 0.48 -22 --------------- -23 unix_table_lock 45 [] unix_create1+0x16e/0x1b0 -24 unix_table_lock 47 [] unix_release_sock+0x31/0x250 -25 unix_table_lock 15 [] unix_find_other+0x117/0x230 -26 unix_table_lock 5 [] unix_autobind+0x11f/0x1b0 -27 --------------- -28 unix_table_lock 39 [] unix_release_sock+0x31/0x250 -29 unix_table_lock 49 [] unix_create1+0x16e/0x1b0 -30 unix_table_lock 20 [] unix_find_other+0x117/0x230 -31 unix_table_lock 4 [] unix_autobind+0x11f/0x1b0 - - -This excerpt shows the first two lock class statistics. Line 01 shows the -output version - each time the format changes this will be updated. Line 02-04 -show the header with column descriptions. Lines 05-18 and 20-31 show the actual -statistics. These statistics come in two parts; the actual stats separated by a -short separator (line 08, 13) from the contention points. - -The first lock (05-18) is a read/write lock, and shows two lines above the -short separator. The contention points don't match the column descriptors, -they have two: contentions and [] symbol. The second set of contention -points are the points we're contending with. - -The integer part of the time values is in us. - -Dealing with nested locks, subclasses may appear: - -32........................................................................................................................................................................................................................... -33 -34 &rq->lock: 13128 13128 0.43 190.53 103881.26 7.91 97454 3453404 0.00 401.11 13224683.11 3.82 -35 --------- -36 &rq->lock 645 [] task_rq_lock+0x43/0x75 -37 &rq->lock 297 [] try_to_wake_up+0x127/0x25a -38 &rq->lock 360 [] select_task_rq_fair+0x1f0/0x74a -39 &rq->lock 428 [] scheduler_tick+0x46/0x1fb -40 --------- -41 &rq->lock 77 [] task_rq_lock+0x43/0x75 -42 &rq->lock 174 [] try_to_wake_up+0x127/0x25a -43 &rq->lock 4715 [] double_rq_lock+0x42/0x54 -44 &rq->lock 893 [] schedule+0x157/0x7b8 -45 -46........................................................................................................................................................................................................................... -47 -48 &rq->lock/1: 1526 11488 0.33 388.73 136294.31 11.86 21461 38404 0.00 37.93 109388.53 2.84 -49 ----------- -50 &rq->lock/1 11526 [] double_rq_lock+0x4f/0x54 -51 ----------- -52 &rq->lock/1 5645 [] double_rq_lock+0x42/0x54 -53 &rq->lock/1 1224 [] schedule+0x157/0x7b8 -54 &rq->lock/1 4336 [] double_rq_lock+0x4f/0x54 -55 &rq->lock/1 181 [] try_to_wake_up+0x127/0x25a - -Line 48 shows statistics for the second subclass (/1) of &rq->lock class -(subclass starts from 0), since in this case, as line 50 suggests, -double_rq_lock actually acquires a nested lock of two spinlocks. - -View the top contending locks: - -# grep : /proc/lock_stat | head - clockevents_lock: 2926159 2947636 0.15 46882.81 1784540466.34 605.41 3381345 3879161 0.00 2260.97 53178395.68 13.71 - tick_broadcast_lock: 346460 346717 0.18 2257.43 39364622.71 113.54 3642919 4242696 0.00 2263.79 49173646.60 11.59 - &mapping->i_mmap_mutex: 203896 203899 3.36 645530.05 31767507988.39 155800.21 3361776 8893984 0.17 2254.15 14110121.02 1.59 - &rq->lock: 135014 136909 0.18 606.09 842160.68 6.15 1540728 10436146 0.00 728.72 17606683.41 1.69 - &(&zone->lru_lock)->rlock: 93000 94934 0.16 59.18 188253.78 1.98 1199912 3809894 0.15 391.40 3559518.81 0.93 - tasklist_lock-W: 40667 41130 0.23 1189.42 428980.51 10.43 270278 510106 0.16 653.51 3939674.91 7.72 - tasklist_lock-R: 21298 21305 0.20 1310.05 215511.12 10.12 186204 241258 0.14 1162.33 1179779.23 4.89 - rcu_node_1: 47656 49022 0.16 635.41 193616.41 3.95 844888 1865423 0.00 764.26 1656226.96 0.89 - &(&dentry->d_lockref.lock)->rlock: 39791 40179 0.15 1302.08 88851.96 2.21 2790851 12527025 0.10 1910.75 3379714.27 0.27 - rcu_node_0: 29203 30064 0.16 786.55 1555573.00 51.74 88963 244254 0.00 398.87 428872.51 1.76 - -Clear the statistics: - -# echo 0 > /proc/lock_stat diff --git a/Documentation/mutex-design.txt b/Documentation/mutex-design.txt deleted file mode 100644 index ee231ed09ec6..000000000000 --- a/Documentation/mutex-design.txt +++ /dev/null @@ -1,157 +0,0 @@ -Generic Mutex Subsystem - -started by Ingo Molnar -updated by Davidlohr Bueso - -What are mutexes? ------------------ - -In the Linux kernel, mutexes refer to a particular locking primitive -that enforces serialization on shared memory systems, and not only to -the generic term referring to 'mutual exclusion' found in academia -or similar theoretical text books. Mutexes are sleeping locks which -behave similarly to binary semaphores, and were introduced in 2006[1] -as an alternative to these. This new data structure provided a number -of advantages, including simpler interfaces, and at that time smaller -code (see Disadvantages). - -[1] http://lwn.net/Articles/164802/ - -Implementation --------------- - -Mutexes are represented by 'struct mutex', defined in include/linux/mutex.h -and implemented in kernel/locking/mutex.c. These locks use a three -state atomic counter (->count) to represent the different possible -transitions that can occur during the lifetime of a lock: - - 1: unlocked - 0: locked, no waiters - negative: locked, with potential waiters - -In its most basic form it also includes a wait-queue and a spinlock -that serializes access to it. CONFIG_SMP systems can also include -a pointer to the lock task owner (->owner) as well as a spinner MCS -lock (->osq), both described below in (ii). - -When acquiring a mutex, there are three possible paths that can be -taken, depending on the state of the lock: - -(i) fastpath: tries to atomically acquire the lock by decrementing the - counter. If it was already taken by another task it goes to the next - possible path. This logic is architecture specific. On x86-64, the - locking fastpath is 2 instructions: - - 0000000000000e10 : - e21: f0 ff 0b lock decl (%rbx) - e24: 79 08 jns e2e - - the unlocking fastpath is equally tight: - - 0000000000000bc0 : - bc8: f0 ff 07 lock incl (%rdi) - bcb: 7f 0a jg bd7 - - -(ii) midpath: aka optimistic spinning, tries to spin for acquisition - while the lock owner is running and there are no other tasks ready - to run that have higher priority (need_resched). The rationale is - that if the lock owner is running, it is likely to release the lock - soon. The mutex spinners are queued up using MCS lock so that only - one spinner can compete for the mutex. - - The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spinlock - with the desirable properties of being fair and with each cpu trying - to acquire the lock spinning on a local variable. It avoids expensive - cacheline bouncing that common test-and-set spinlock implementations - incur. An MCS-like lock is specially tailored for optimistic spinning - for sleeping lock implementation. An important feature of the customized - MCS lock is that it has the extra property that spinners are able to exit - the MCS spinlock queue when they need to reschedule. This further helps - avoid situations where MCS spinners that need to reschedule would continue - waiting to spin on mutex owner, only to go directly to slowpath upon - obtaining the MCS lock. - - -(iii) slowpath: last resort, if the lock is still unable to be acquired, - the task is added to the wait-queue and sleeps until woken up by the - unlock path. Under normal circumstances it blocks as TASK_UNINTERRUPTIBLE. - -While formally kernel mutexes are sleepable locks, it is path (ii) that -makes them more practically a hybrid type. By simply not interrupting a -task and busy-waiting for a few cycles instead of immediately sleeping, -the performance of this lock has been seen to significantly improve a -number of workloads. Note that this technique is also used for rw-semaphores. - -Semantics ---------- - -The mutex subsystem checks and enforces the following rules: - - - Only one task can hold the mutex at a time. - - Only the owner can unlock the mutex. - - Multiple unlocks are not permitted. - - Recursive locking/unlocking is not permitted. - - A mutex must only be initialized via the API (see below). - - A task may not exit with a mutex held. - - Memory areas where held locks reside must not be freed. - - Held mutexes must not be reinitialized. - - Mutexes may not be used in hardware or software interrupt - contexts such as tasklets and timers. - -These semantics are fully enforced when CONFIG DEBUG_MUTEXES is enabled. -In addition, the mutex debugging code also implements a number of other -features that make lock debugging easier and faster: - - - Uses symbolic names of mutexes, whenever they are printed - in debug output. - - Point-of-acquire tracking, symbolic lookup of function names, - list of all locks held in the system, printout of them. - - Owner tracking. - - Detects self-recursing locks and prints out all relevant info. - - Detects multi-task circular deadlocks and prints out all affected - locks and tasks (and only those tasks). - - -Interfaces ----------- -Statically define the mutex: - DEFINE_MUTEX(name); - -Dynamically initialize the mutex: - mutex_init(mutex); - -Acquire the mutex, uninterruptible: - void mutex_lock(struct mutex *lock); - void mutex_lock_nested(struct mutex *lock, unsigned int subclass); - int mutex_trylock(struct mutex *lock); - -Acquire the mutex, interruptible: - int mutex_lock_interruptible_nested(struct mutex *lock, - unsigned int subclass); - int mutex_lock_interruptible(struct mutex *lock); - -Acquire the mutex, interruptible, if dec to 0: - int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); - -Unlock the mutex: - void mutex_unlock(struct mutex *lock); - -Test if the mutex is taken: - int mutex_is_locked(struct mutex *lock); - -Disadvantages -------------- - -Unlike its original design and purpose, 'struct mutex' is larger than -most locks in the kernel. E.g: on x86-64 it is 40 bytes, almost twice -as large as 'struct semaphore' (24 bytes) and 8 bytes shy of the -'struct rw_semaphore' variant. Larger structure sizes mean more CPU -cache and memory footprint. - -When to use mutexes -------------------- - -Unless the strict semantics of mutexes are unsuitable and/or the critical -region prevents the lock from being shared, always prefer them to any other -locking primitive. diff --git a/Documentation/rt-mutex-design.txt b/Documentation/rt-mutex-design.txt deleted file mode 100644 index 8666070d3189..000000000000 --- a/Documentation/rt-mutex-design.txt +++ /dev/null @@ -1,781 +0,0 @@ -# -# Copyright (c) 2006 Steven Rostedt -# Licensed under the GNU Free Documentation License, Version 1.2 -# - -RT-mutex implementation design ------------------------------- - -This document tries to describe the design of the rtmutex.c implementation. -It doesn't describe the reasons why rtmutex.c exists. For that please see -Documentation/rt-mutex.txt. Although this document does explain problems -that happen without this code, but that is in the concept to understand -what the code actually is doing. - -The goal of this document is to help others understand the priority -inheritance (PI) algorithm that is used, as well as reasons for the -decisions that were made to implement PI in the manner that was done. - - -Unbounded Priority Inversion ----------------------------- - -Priority inversion is when a lower priority process executes while a higher -priority process wants to run. This happens for several reasons, and -most of the time it can't be helped. Anytime a high priority process wants -to use a resource that a lower priority process has (a mutex for example), -the high priority process must wait until the lower priority process is done -with the resource. This is a priority inversion. What we want to prevent -is something called unbounded priority inversion. That is when the high -priority process is prevented from running by a lower priority process for -an undetermined amount of time. - -The classic example of unbounded priority inversion is where you have three -processes, let's call them processes A, B, and C, where A is the highest -priority process, C is the lowest, and B is in between. A tries to grab a lock -that C owns and must wait and lets C run to release the lock. But in the -meantime, B executes, and since B is of a higher priority than C, it preempts C, -but by doing so, it is in fact preempting A which is a higher priority process. -Now there's no way of knowing how long A will be sleeping waiting for C -to release the lock, because for all we know, B is a CPU hog and will -never give C a chance to release the lock. This is called unbounded priority -inversion. - -Here's a little ASCII art to show the problem. - - grab lock L1 (owned by C) - | -A ---+ - C preempted by B - | -C +----+ - -B +--------> - B now keeps A from running. - - -Priority Inheritance (PI) -------------------------- - -There are several ways to solve this issue, but other ways are out of scope -for this document. Here we only discuss PI. - -PI is where a process inherits the priority of another process if the other -process blocks on a lock owned by the current process. To make this easier -to understand, let's use the previous example, with processes A, B, and C again. - -This time, when A blocks on the lock owned by C, C would inherit the priority -of A. So now if B becomes runnable, it would not preempt C, since C now has -the high priority of A. As soon as C releases the lock, it loses its -inherited priority, and A then can continue with the resource that C had. - -Terminology ------------ - -Here I explain some terminology that is used in this document to help describe -the design that is used to implement PI. - -PI chain - The PI chain is an ordered series of locks and processes that cause - processes to inherit priorities from a previous process that is - blocked on one of its locks. This is described in more detail - later in this document. - -mutex - In this document, to differentiate from locks that implement - PI and spin locks that are used in the PI code, from now on - the PI locks will be called a mutex. - -lock - In this document from now on, I will use the term lock when - referring to spin locks that are used to protect parts of the PI - algorithm. These locks disable preemption for UP (when - CONFIG_PREEMPT is enabled) and on SMP prevents multiple CPUs from - entering critical sections simultaneously. - -spin lock - Same as lock above. - -waiter - A waiter is a struct that is stored on the stack of a blocked - process. Since the scope of the waiter is within the code for - a process being blocked on the mutex, it is fine to allocate - the waiter on the process's stack (local variable). This - structure holds a pointer to the task, as well as the mutex that - the task is blocked on. It also has the plist node structures to - place the task in the waiter_list of a mutex as well as the - pi_list of a mutex owner task (described below). - - waiter is sometimes used in reference to the task that is waiting - on a mutex. This is the same as waiter->task. - -waiters - A list of processes that are blocked on a mutex. - -top waiter - The highest priority process waiting on a specific mutex. - -top pi waiter - The highest priority process waiting on one of the mutexes - that a specific process owns. - -Note: task and process are used interchangeably in this document, mostly to - differentiate between two processes that are being described together. - - -PI chain --------- - -The PI chain is a list of processes and mutexes that may cause priority -inheritance to take place. Multiple chains may converge, but a chain -would never diverge, since a process can't be blocked on more than one -mutex at a time. - -Example: - - Process: A, B, C, D, E - Mutexes: L1, L2, L3, L4 - - A owns: L1 - B blocked on L1 - B owns L2 - C blocked on L2 - C owns L3 - D blocked on L3 - D owns L4 - E blocked on L4 - -The chain would be: - - E->L4->D->L3->C->L2->B->L1->A - -To show where two chains merge, we could add another process F and -another mutex L5 where B owns L5 and F is blocked on mutex L5. - -The chain for F would be: - - F->L5->B->L1->A - -Since a process may own more than one mutex, but never be blocked on more than -one, the chains merge. - -Here we show both chains: - - E->L4->D->L3->C->L2-+ - | - +->B->L1->A - | - F->L5-+ - -For PI to work, the processes at the right end of these chains (or we may -also call it the Top of the chain) must be equal to or higher in priority -than the processes to the left or below in the chain. - -Also since a mutex may have more than one process blocked on it, we can -have multiple chains merge at mutexes. If we add another process G that is -blocked on mutex L2: - - G->L2->B->L1->A - -And once again, to show how this can grow I will show the merging chains -again. - - E->L4->D->L3->C-+ - +->L2-+ - | | - G-+ +->B->L1->A - | - F->L5-+ - - -Plist ------ - -Before I go further and talk about how the PI chain is stored through lists -on both mutexes and processes, I'll explain the plist. This is similar to -the struct list_head functionality that is already in the kernel. -The implementation of plist is out of scope for this document, but it is -very important to understand what it does. - -There are a few differences between plist and list, the most important one -being that plist is a priority sorted linked list. This means that the -priorities of the plist are sorted, such that it takes O(1) to retrieve the -highest priority item in the list. Obviously this is useful to store processes -based on their priorities. - -Another difference, which is important for implementation, is that, unlike -list, the head of the list is a different element than the nodes of a list. -So the head of the list is declared as struct plist_head and nodes that will -be added to the list are declared as struct plist_node. - - -Mutex Waiter List ------------------ - -Every mutex keeps track of all the waiters that are blocked on itself. The mutex -has a plist to store these waiters by priority. This list is protected by -a spin lock that is located in the struct of the mutex. This lock is called -wait_lock. Since the modification of the waiter list is never done in -interrupt context, the wait_lock can be taken without disabling interrupts. - - -Task PI List ------------- - -To keep track of the PI chains, each process has its own PI list. This is -a list of all top waiters of the mutexes that are owned by the process. -Note that this list only holds the top waiters and not all waiters that are -blocked on mutexes owned by the process. - -The top of the task's PI list is always the highest priority task that -is waiting on a mutex that is owned by the task. So if the task has -inherited a priority, it will always be the priority of the task that is -at the top of this list. - -This list is stored in the task structure of a process as a plist called -pi_list. This list is protected by a spin lock also in the task structure, -called pi_lock. This lock may also be taken in interrupt context, so when -locking the pi_lock, interrupts must be disabled. - - -Depth of the PI Chain ---------------------- - -The maximum depth of the PI chain is not dynamic, and could actually be -defined. But is very complex to figure it out, since it depends on all -the nesting of mutexes. Let's look at the example where we have 3 mutexes, -L1, L2, and L3, and four separate functions func1, func2, func3 and func4. -The following shows a locking order of L1->L2->L3, but may not actually -be directly nested that way. - -void func1(void) -{ - mutex_lock(L1); - - /* do anything */ - - mutex_unlock(L1); -} - -void func2(void) -{ - mutex_lock(L1); - mutex_lock(L2); - - /* do something */ - - mutex_unlock(L2); - mutex_unlock(L1); -} - -void func3(void) -{ - mutex_lock(L2); - mutex_lock(L3); - - /* do something else */ - - mutex_unlock(L3); - mutex_unlock(L2); -} - -void func4(void) -{ - mutex_lock(L3); - - /* do something again */ - - mutex_unlock(L3); -} - -Now we add 4 processes that run each of these functions separately. -Processes A, B, C, and D which run functions func1, func2, func3 and func4 -respectively, and such that D runs first and A last. With D being preempted -in func4 in the "do something again" area, we have a locking that follows: - -D owns L3 - C blocked on L3 - C owns L2 - B blocked on L2 - B owns L1 - A blocked on L1 - -And thus we have the chain A->L1->B->L2->C->L3->D. - -This gives us a PI depth of 4 (four processes), but looking at any of the -functions individually, it seems as though they only have at most a locking -depth of two. So, although the locking depth is defined at compile time, -it still is very difficult to find the possibilities of that depth. - -Now since mutexes can be defined by user-land applications, we don't want a DOS -type of application that nests large amounts of mutexes to create a large -PI chain, and have the code holding spin locks while looking at a large -amount of data. So to prevent this, the implementation not only implements -a maximum lock depth, but also only holds at most two different locks at a -time, as it walks the PI chain. More about this below. - - -Mutex owner and flags ---------------------- - -The mutex structure contains a pointer to the owner of the mutex. If the -mutex is not owned, this owner is set to NULL. Since all architectures -have the task structure on at least a four byte alignment (and if this is -not true, the rtmutex.c code will be broken!), this allows for the two -least significant bits to be used as flags. This part is also described -in Documentation/rt-mutex.txt, but will also be briefly described here. - -Bit 0 is used as the "Pending Owner" flag. This is described later. -Bit 1 is used as the "Has Waiters" flags. This is also described later - in more detail, but is set whenever there are waiters on a mutex. - - -cmpxchg Tricks --------------- - -Some architectures implement an atomic cmpxchg (Compare and Exchange). This -is used (when applicable) to keep the fast path of grabbing and releasing -mutexes short. - -cmpxchg is basically the following function performed atomically: - -unsigned long _cmpxchg(unsigned long *A, unsigned long *B, unsigned long *C) -{ - unsigned long T = *A; - if (*A == *B) { - *A = *C; - } - return T; -} -#define cmpxchg(a,b,c) _cmpxchg(&a,&b,&c) - -This is really nice to have, since it allows you to only update a variable -if the variable is what you expect it to be. You know if it succeeded if -the return value (the old value of A) is equal to B. - -The macro rt_mutex_cmpxchg is used to try to lock and unlock mutexes. If -the architecture does not support CMPXCHG, then this macro is simply set -to fail every time. But if CMPXCHG is supported, then this will -help out extremely to keep the fast path short. - -The use of rt_mutex_cmpxchg with the flags in the owner field help optimize -the system for architectures that support it. This will also be explained -later in this document. - - -Priority adjustments --------------------- - -The implementation of the PI code in rtmutex.c has several places that a -process must adjust its priority. With the help of the pi_list of a -process this is rather easy to know what needs to be adjusted. - -The functions implementing the task adjustments are rt_mutex_adjust_prio, -__rt_mutex_adjust_prio (same as the former, but expects the task pi_lock -to already be taken), rt_mutex_getprio, and rt_mutex_setprio. - -rt_mutex_getprio and rt_mutex_setprio are only used in __rt_mutex_adjust_prio. - -rt_mutex_getprio returns the priority that the task should have. Either the -task's own normal priority, or if a process of a higher priority is waiting on -a mutex owned by the task, then that higher priority should be returned. -Since the pi_list of a task holds an order by priority list of all the top -waiters of all the mutexes that the task owns, rt_mutex_getprio simply needs -to compare the top pi waiter to its own normal priority, and return the higher -priority back. - -(Note: if looking at the code, you will notice that the lower number of - prio is returned. This is because the prio field in the task structure - is an inverse order of the actual priority. So a "prio" of 5 is - of higher priority than a "prio" of 10.) - -__rt_mutex_adjust_prio examines the result of rt_mutex_getprio, and if the -result does not equal the task's current priority, then rt_mutex_setprio -is called to adjust the priority of the task to the new priority. -Note that rt_mutex_setprio is defined in kernel/sched/core.c to implement the -actual change in priority. - -It is interesting to note that __rt_mutex_adjust_prio can either increase -or decrease the priority of the task. In the case that a higher priority -process has just blocked on a mutex owned by the task, __rt_mutex_adjust_prio -would increase/boost the task's priority. But if a higher priority task -were for some reason to leave the mutex (timeout or signal), this same function -would decrease/unboost the priority of the task. That is because the pi_list -always contains the highest priority task that is waiting on a mutex owned -by the task, so we only need to compare the priority of that top pi waiter -to the normal priority of the given task. - - -High level overview of the PI chain walk ----------------------------------------- - -The PI chain walk is implemented by the function rt_mutex_adjust_prio_chain. - -The implementation has gone through several iterations, and has ended up -with what we believe is the best. It walks the PI chain by only grabbing -at most two locks at a time, and is very efficient. - -The rt_mutex_adjust_prio_chain can be used either to boost or lower process -priorities. - -rt_mutex_adjust_prio_chain is called with a task to be checked for PI -(de)boosting (the owner of a mutex that a process is blocking on), a flag to -check for deadlocking, the mutex that the task owns, and a pointer to a waiter -that is the process's waiter struct that is blocked on the mutex (although this -parameter may be NULL for deboosting). - -For this explanation, I will not mention deadlock detection. This explanation -will try to stay at a high level. - -When this function is called, there are no locks held. That also means -that the state of the owner and lock can change when entered into this function. - -Before this function is called, the task has already had rt_mutex_adjust_prio -performed on it. This means that the task is set to the priority that it -should be at, but the plist nodes of the task's waiter have not been updated -with the new priorities, and that this task may not be in the proper locations -in the pi_lists and wait_lists that the task is blocked on. This function -solves all that. - -A loop is entered, where task is the owner to be checked for PI changes that -was passed by parameter (for the first iteration). The pi_lock of this task is -taken to prevent any more changes to the pi_list of the task. This also -prevents new tasks from completing the blocking on a mutex that is owned by this -task. - -If the task is not blocked on a mutex then the loop is exited. We are at -the top of the PI chain. - -A check is now done to see if the original waiter (the process that is blocked -on the current mutex) is the top pi waiter of the task. That is, is this -waiter on the top of the task's pi_list. If it is not, it either means that -there is another process higher in priority that is blocked on one of the -mutexes that the task owns, or that the waiter has just woken up via a signal -or timeout and has left the PI chain. In either case, the loop is exited, since -we don't need to do any more changes to the priority of the current task, or any -task that owns a mutex that this current task is waiting on. A priority chain -walk is only needed when a new top pi waiter is made to a task. - -The next check sees if the task's waiter plist node has the priority equal to -the priority the task is set at. If they are equal, then we are done with -the loop. Remember that the function started with the priority of the -task adjusted, but the plist nodes that hold the task in other processes -pi_lists have not been adjusted. - -Next, we look at the mutex that the task is blocked on. The mutex's wait_lock -is taken. This is done by a spin_trylock, because the locking order of the -pi_lock and wait_lock goes in the opposite direction. If we fail to grab the -lock, the pi_lock is released, and we restart the loop. - -Now that we have both the pi_lock of the task as well as the wait_lock of -the mutex the task is blocked on, we update the task's waiter's plist node -that is located on the mutex's wait_list. - -Now we release the pi_lock of the task. - -Next the owner of the mutex has its pi_lock taken, so we can update the -task's entry in the owner's pi_list. If the task is the highest priority -process on the mutex's wait_list, then we remove the previous top waiter -from the owner's pi_list, and replace it with the task. - -Note: It is possible that the task was the current top waiter on the mutex, - in which case the task is not yet on the pi_list of the waiter. This - is OK, since plist_del does nothing if the plist node is not on any - list. - -If the task was not the top waiter of the mutex, but it was before we -did the priority updates, that means we are deboosting/lowering the -task. In this case, the task is removed from the pi_list of the owner, -and the new top waiter is added. - -Lastly, we unlock both the pi_lock of the task, as well as the mutex's -wait_lock, and continue the loop again. On the next iteration of the -loop, the previous owner of the mutex will be the task that will be -processed. - -Note: One might think that the owner of this mutex might have changed - since we just grab the mutex's wait_lock. And one could be right. - The important thing to remember is that the owner could not have - become the task that is being processed in the PI chain, since - we have taken that task's pi_lock at the beginning of the loop. - So as long as there is an owner of this mutex that is not the same - process as the tasked being worked on, we are OK. - - Looking closely at the code, one might be confused. The check for the - end of the PI chain is when the task isn't blocked on anything or the - task's waiter structure "task" element is NULL. This check is - protected only by the task's pi_lock. But the code to unlock the mutex - sets the task's waiter structure "task" element to NULL with only - the protection of the mutex's wait_lock, which was not taken yet. - Isn't this a race condition if the task becomes the new owner? - - The answer is No! The trick is the spin_trylock of the mutex's - wait_lock. If we fail that lock, we release the pi_lock of the - task and continue the loop, doing the end of PI chain check again. - - In the code to release the lock, the wait_lock of the mutex is held - the entire time, and it is not let go when we grab the pi_lock of the - new owner of the mutex. So if the switch of a new owner were to happen - after the check for end of the PI chain and the grabbing of the - wait_lock, the unlocking code would spin on the new owner's pi_lock - but never give up the wait_lock. So the PI chain loop is guaranteed to - fail the spin_trylock on the wait_lock, release the pi_lock, and - try again. - - If you don't quite understand the above, that's OK. You don't have to, - unless you really want to make a proof out of it ;) - - -Pending Owners and Lock stealing --------------------------------- - -One of the flags in the owner field of the mutex structure is "Pending Owner". -What this means is that an owner was chosen by the process releasing the -mutex, but that owner has yet to wake up and actually take the mutex. - -Why is this important? Why can't we just give the mutex to another process -and be done with it? - -The PI code is to help with real-time processes, and to let the highest -priority process run as long as possible with little latencies and delays. -If a high priority process owns a mutex that a lower priority process is -blocked on, when the mutex is released it would be given to the lower priority -process. What if the higher priority process wants to take that mutex again. -The high priority process would fail to take that mutex that it just gave up -and it would need to boost the lower priority process to run with full -latency of that critical section (since the low priority process just entered -it). - -There's no reason a high priority process that gives up a mutex should be -penalized if it tries to take that mutex again. If the new owner of the -mutex has not woken up yet, there's no reason that the higher priority process -could not take that mutex away. - -To solve this, we introduced Pending Ownership and Lock Stealing. When a -new process is given a mutex that it was blocked on, it is only given -pending ownership. This means that it's the new owner, unless a higher -priority process comes in and tries to grab that mutex. If a higher priority -process does come along and wants that mutex, we let the higher priority -process "steal" the mutex from the pending owner (only if it is still pending) -and continue with the mutex. - - -Taking of a mutex (The walk through) ------------------------------------- - -OK, now let's take a look at the detailed walk through of what happens when -taking a mutex. - -The first thing that is tried is the fast taking of the mutex. This is -done when we have CMPXCHG enabled (otherwise the fast taking automatically -fails). Only when the owner field of the mutex is NULL can the lock be -taken with the CMPXCHG and nothing else needs to be done. - -If there is contention on the lock, whether it is owned or pending owner -we go about the slow path (rt_mutex_slowlock). - -The slow path function is where the task's waiter structure is created on -the stack. This is because the waiter structure is only needed for the -scope of this function. The waiter structure holds the nodes to store -the task on the wait_list of the mutex, and if need be, the pi_list of -the owner. - -The wait_lock of the mutex is taken since the slow path of unlocking the -mutex also takes this lock. - -We then call try_to_take_rt_mutex. This is where the architecture that -does not implement CMPXCHG would always grab the lock (if there's no -contention). - -try_to_take_rt_mutex is used every time the task tries to grab a mutex in the -slow path. The first thing that is done here is an atomic setting of -the "Has Waiters" flag of the mutex's owner field. Yes, this could really -be false, because if the mutex has no owner, there are no waiters and -the current task also won't have any waiters. But we don't have the lock -yet, so we assume we are going to be a waiter. The reason for this is to -play nice for those architectures that do have CMPXCHG. By setting this flag -now, the owner of the mutex can't release the mutex without going into the -slow unlock path, and it would then need to grab the wait_lock, which this -code currently holds. So setting the "Has Waiters" flag forces the owner -to synchronize with this code. - -Now that we know that we can't have any races with the owner releasing the -mutex, we check to see if we can take the ownership. This is done if the -mutex doesn't have a owner, or if we can steal the mutex from a pending -owner. Let's look at the situations we have here. - - 1) Has owner that is pending - ---------------------------- - - The mutex has a owner, but it hasn't woken up and the mutex flag - "Pending Owner" is set. The first check is to see if the owner isn't the - current task. This is because this function is also used for the pending - owner to grab the mutex. When a pending owner wakes up, it checks to see - if it can take the mutex, and this is done if the owner is already set to - itself. If so, we succeed and leave the function, clearing the "Pending - Owner" bit. - - If the pending owner is not current, we check to see if the current priority is - higher than the pending owner. If not, we fail the function and return. - - There's also something special about a pending owner. That is a pending owner - is never blocked on a mutex. So there is no PI chain to worry about. It also - means that if the mutex doesn't have any waiters, there's no accounting needed - to update the pending owner's pi_list, since we only worry about processes - blocked on the current mutex. - - If there are waiters on this mutex, and we just stole the ownership, we need - to take the top waiter, remove it from the pi_list of the pending owner, and - add it to the current pi_list. Note that at this moment, the pending owner - is no longer on the list of waiters. This is fine, since the pending owner - would add itself back when it realizes that it had the ownership stolen - from itself. When the pending owner tries to grab the mutex, it will fail - in try_to_take_rt_mutex if the owner field points to another process. - - 2) No owner - ----------- - - If there is no owner (or we successfully stole the lock), we set the owner - of the mutex to current, and set the flag of "Has Waiters" if the current - mutex actually has waiters, or we clear the flag if it doesn't. See, it was - OK that we set that flag early, since now it is cleared. - - 3) Failed to grab ownership - --------------------------- - - The most interesting case is when we fail to take ownership. This means that - there exists an owner, or there's a pending owner with equal or higher - priority than the current task. - -We'll continue on the failed case. - -If the mutex has a timeout, we set up a timer to go off to break us out -of this mutex if we failed to get it after a specified amount of time. - -Now we enter a loop that will continue to try to take ownership of the mutex, or -fail from a timeout or signal. - -Once again we try to take the mutex. This will usually fail the first time -in the loop, since it had just failed to get the mutex. But the second time -in the loop, this would likely succeed, since the task would likely be -the pending owner. - -If the mutex is TASK_INTERRUPTIBLE a check for signals and timeout is done -here. - -The waiter structure has a "task" field that points to the task that is blocked -on the mutex. This field can be NULL the first time it goes through the loop -or if the task is a pending owner and had its mutex stolen. If the "task" -field is NULL then we need to set up the accounting for it. - -Task blocks on mutex --------------------- - -The accounting of a mutex and process is done with the waiter structure of -the process. The "task" field is set to the process, and the "lock" field -to the mutex. The plist nodes are initialized to the processes current -priority. - -Since the wait_lock was taken at the entry of the slow lock, we can safely -add the waiter to the wait_list. If the current process is the highest -priority process currently waiting on this mutex, then we remove the -previous top waiter process (if it exists) from the pi_list of the owner, -and add the current process to that list. Since the pi_list of the owner -has changed, we call rt_mutex_adjust_prio on the owner to see if the owner -should adjust its priority accordingly. - -If the owner is also blocked on a lock, and had its pi_list changed -(or deadlock checking is on), we unlock the wait_lock of the mutex and go ahead -and run rt_mutex_adjust_prio_chain on the owner, as described earlier. - -Now all locks are released, and if the current process is still blocked on a -mutex (waiter "task" field is not NULL), then we go to sleep (call schedule). - -Waking up in the loop ---------------------- - -The schedule can then wake up for a few reasons. - 1) we were given pending ownership of the mutex. - 2) we received a signal and was TASK_INTERRUPTIBLE - 3) we had a timeout and was TASK_INTERRUPTIBLE - -In any of these cases, we continue the loop and once again try to grab the -ownership of the mutex. If we succeed, we exit the loop, otherwise we continue -and on signal and timeout, will exit the loop, or if we had the mutex stolen -we just simply add ourselves back on the lists and go back to sleep. - -Note: For various reasons, because of timeout and signals, the steal mutex - algorithm needs to be careful. This is because the current process is - still on the wait_list. And because of dynamic changing of priorities, - especially on SCHED_OTHER tasks, the current process can be the - highest priority task on the wait_list. - -Failed to get mutex on Timeout or Signal ----------------------------------------- - -If a timeout or signal occurred, the waiter's "task" field would not be -NULL and the task needs to be taken off the wait_list of the mutex and perhaps -pi_list of the owner. If this process was a high priority process, then -the rt_mutex_adjust_prio_chain needs to be executed again on the owner, -but this time it will be lowering the priorities. - - -Unlocking the Mutex -------------------- - -The unlocking of a mutex also has a fast path for those architectures with -CMPXCHG. Since the taking of a mutex on contention always sets the -"Has Waiters" flag of the mutex's owner, we use this to know if we need to -take the slow path when unlocking the mutex. If the mutex doesn't have any -waiters, the owner field of the mutex would equal the current process and -the mutex can be unlocked by just replacing the owner field with NULL. - -If the owner field has the "Has Waiters" bit set (or CMPXCHG is not available), -the slow unlock path is taken. - -The first thing done in the slow unlock path is to take the wait_lock of the -mutex. This synchronizes the locking and unlocking of the mutex. - -A check is made to see if the mutex has waiters or not. On architectures that -do not have CMPXCHG, this is the location that the owner of the mutex will -determine if a waiter needs to be awoken or not. On architectures that -do have CMPXCHG, that check is done in the fast path, but it is still needed -in the slow path too. If a waiter of a mutex woke up because of a signal -or timeout between the time the owner failed the fast path CMPXCHG check and -the grabbing of the wait_lock, the mutex may not have any waiters, thus the -owner still needs to make this check. If there are no waiters then the mutex -owner field is set to NULL, the wait_lock is released and nothing more is -needed. - -If there are waiters, then we need to wake one up and give that waiter -pending ownership. - -On the wake up code, the pi_lock of the current owner is taken. The top -waiter of the lock is found and removed from the wait_list of the mutex -as well as the pi_list of the current owner. The task field of the new -pending owner's waiter structure is set to NULL, and the owner field of the -mutex is set to the new owner with the "Pending Owner" bit set, as well -as the "Has Waiters" bit if there still are other processes blocked on the -mutex. - -The pi_lock of the previous owner is released, and the new pending owner's -pi_lock is taken. Remember that this is the trick to prevent the race -condition in rt_mutex_adjust_prio_chain from adding itself as a waiter -on the mutex. - -We now clear the "pi_blocked_on" field of the new pending owner, and if -the mutex still has waiters pending, we add the new top waiter to the pi_list -of the pending owner. - -Finally we unlock the pi_lock of the pending owner and wake it up. - - -Contact -------- - -For updates on this document, please email Steven Rostedt - - -Credits -------- - -Author: Steven Rostedt - -Reviewers: Ingo Molnar, Thomas Gleixner, Thomas Duetsch, and Randy Dunlap - -Updates -------- - -This document was originally written for 2.6.17-rc3-mm1 diff --git a/Documentation/rt-mutex.txt b/Documentation/rt-mutex.txt deleted file mode 100644 index 243393d882ee..000000000000 --- a/Documentation/rt-mutex.txt +++ /dev/null @@ -1,79 +0,0 @@ -RT-mutex subsystem with PI support ----------------------------------- - -RT-mutexes with priority inheritance are used to support PI-futexes, -which enable pthread_mutex_t priority inheritance attributes -(PTHREAD_PRIO_INHERIT). [See Documentation/pi-futex.txt for more details -about PI-futexes.] - -This technology was developed in the -rt tree and streamlined for -pthread_mutex support. - -Basic principles: ------------------ - -RT-mutexes extend the semantics of simple mutexes by the priority -inheritance protocol. - -A low priority owner of a rt-mutex inherits the priority of a higher -priority waiter until the rt-mutex is released. If the temporarily -boosted owner blocks on a rt-mutex itself it propagates the priority -boosting to the owner of the other rt_mutex it gets blocked on. The -priority boosting is immediately removed once the rt_mutex has been -unlocked. - -This approach allows us to shorten the block of high-prio tasks on -mutexes which protect shared resources. Priority inheritance is not a -magic bullet for poorly designed applications, but it allows -well-designed applications to use userspace locks in critical parts of -an high priority thread, without losing determinism. - -The enqueueing of the waiters into the rtmutex waiter list is done in -priority order. For same priorities FIFO order is chosen. For each -rtmutex, only the top priority waiter is enqueued into the owner's -priority waiters list. This list too queues in priority order. Whenever -the top priority waiter of a task changes (for example it timed out or -got a signal), the priority of the owner task is readjusted. [The -priority enqueueing is handled by "plists", see include/linux/plist.h -for more details.] - -RT-mutexes are optimized for fastpath operations and have no internal -locking overhead when locking an uncontended mutex or unlocking a mutex -without waiters. The optimized fastpath operations require cmpxchg -support. [If that is not available then the rt-mutex internal spinlock -is used] - -The state of the rt-mutex is tracked via the owner field of the rt-mutex -structure: - -rt_mutex->owner holds the task_struct pointer of the owner. Bit 0 and 1 -are used to keep track of the "owner is pending" and "rtmutex has -waiters" state. - - owner bit1 bit0 - NULL 0 0 mutex is free (fast acquire possible) - NULL 0 1 invalid state - NULL 1 0 Transitional state* - NULL 1 1 invalid state - taskpointer 0 0 mutex is held (fast release possible) - taskpointer 0 1 task is pending owner - taskpointer 1 0 mutex is held and has waiters - taskpointer 1 1 task is pending owner and mutex has waiters - -Pending-ownership handling is a performance optimization: -pending-ownership is assigned to the first (highest priority) waiter of -the mutex, when the mutex is released. The thread is woken up and once -it starts executing it can acquire the mutex. Until the mutex is taken -by it (bit 0 is cleared) a competing higher priority thread can "steal" -the mutex which puts the woken up thread back on the waiters list. - -The pending-ownership optimization is especially important for the -uninterrupted workflow of high-prio tasks which repeatedly -takes/releases locks that have lower-prio waiters. Without this -optimization the higher-prio thread would ping-pong to the lower-prio -task [because at unlock time we always assign a new owner]. - -(*) The "mutex has waiters" bit gets set to take the lock. If the lock -doesn't already have an owner, this bit is quickly cleared if there are -no waiters. So this is a transitional state to synchronize with looking -at the owner field of the mutex and the mutex owner releasing the lock. diff --git a/Documentation/spinlocks.txt b/Documentation/spinlocks.txt deleted file mode 100644 index 97eaf5727178..000000000000 --- a/Documentation/spinlocks.txt +++ /dev/null @@ -1,167 +0,0 @@ -Lesson 1: Spin locks - -The most basic primitive for locking is spinlock. - -static DEFINE_SPINLOCK(xxx_lock); - - unsigned long flags; - - spin_lock_irqsave(&xxx_lock, flags); - ... critical section here .. - spin_unlock_irqrestore(&xxx_lock, flags); - -The above is always safe. It will disable interrupts _locally_, but the -spinlock itself will guarantee the global lock, so it will guarantee that -there is only one thread-of-control within the region(s) protected by that -lock. This works well even under UP also, so the code does _not_ need to -worry about UP vs SMP issues: the spinlocks work correctly under both. - - NOTE! Implications of spin_locks for memory are further described in: - - Documentation/memory-barriers.txt - (5) LOCK operations. - (6) UNLOCK operations. - -The above is usually pretty simple (you usually need and want only one -spinlock for most things - using more than one spinlock can make things a -lot more complex and even slower and is usually worth it only for -sequences that you _know_ need to be split up: avoid it at all cost if you -aren't sure). - -This is really the only really hard part about spinlocks: once you start -using spinlocks they tend to expand to areas you might not have noticed -before, because you have to make sure the spinlocks correctly protect the -shared data structures _everywhere_ they are used. The spinlocks are most -easily added to places that are completely independent of other code (for -example, internal driver data structures that nobody else ever touches). - - NOTE! The spin-lock is safe only when you _also_ use the lock itself - to do locking across CPU's, which implies that EVERYTHING that - touches a shared variable has to agree about the spinlock they want - to use. - ----- - -Lesson 2: reader-writer spinlocks. - -If your data accesses have a very natural pattern where you usually tend -to mostly read from the shared variables, the reader-writer locks -(rw_lock) versions of the spinlocks are sometimes useful. They allow multiple -readers to be in the same critical region at once, but if somebody wants -to change the variables it has to get an exclusive write lock. - - NOTE! reader-writer locks require more atomic memory operations than - simple spinlocks. Unless the reader critical section is long, you - are better off just using spinlocks. - -The routines look the same as above: - - rwlock_t xxx_lock = __RW_LOCK_UNLOCKED(xxx_lock); - - unsigned long flags; - - read_lock_irqsave(&xxx_lock, flags); - .. critical section that only reads the info ... - read_unlock_irqrestore(&xxx_lock, flags); - - write_lock_irqsave(&xxx_lock, flags); - .. read and write exclusive access to the info ... - write_unlock_irqrestore(&xxx_lock, flags); - -The above kind of lock may be useful for complex data structures like -linked lists, especially searching for entries without changing the list -itself. The read lock allows many concurrent readers. Anything that -_changes_ the list will have to get the write lock. - - NOTE! RCU is better for list traversal, but requires careful - attention to design detail (see Documentation/RCU/listRCU.txt). - -Also, you cannot "upgrade" a read-lock to a write-lock, so if you at _any_ -time need to do any changes (even if you don't do it every time), you have -to get the write-lock at the very beginning. - - NOTE! We are working hard to remove reader-writer spinlocks in most - cases, so please don't add a new one without consensus. (Instead, see - Documentation/RCU/rcu.txt for complete information.) - ----- - -Lesson 3: spinlocks revisited. - -The single spin-lock primitives above are by no means the only ones. They -are the most safe ones, and the ones that work under all circumstances, -but partly _because_ they are safe they are also fairly slow. They are slower -than they'd need to be, because they do have to disable interrupts -(which is just a single instruction on a x86, but it's an expensive one - -and on other architectures it can be worse). - -If you have a case where you have to protect a data structure across -several CPU's and you want to use spinlocks you can potentially use -cheaper versions of the spinlocks. IFF you know that the spinlocks are -never used in interrupt handlers, you can use the non-irq versions: - - spin_lock(&lock); - ... - spin_unlock(&lock); - -(and the equivalent read-write versions too, of course). The spinlock will -guarantee the same kind of exclusive access, and it will be much faster. -This is useful if you know that the data in question is only ever -manipulated from a "process context", ie no interrupts involved. - -The reasons you mustn't use these versions if you have interrupts that -play with the spinlock is that you can get deadlocks: - - spin_lock(&lock); - ... - <- interrupt comes in: - spin_lock(&lock); - -where an interrupt tries to lock an already locked variable. This is ok if -the other interrupt happens on another CPU, but it is _not_ ok if the -interrupt happens on the same CPU that already holds the lock, because the -lock will obviously never be released (because the interrupt is waiting -for the lock, and the lock-holder is interrupted by the interrupt and will -not continue until the interrupt has been processed). - -(This is also the reason why the irq-versions of the spinlocks only need -to disable the _local_ interrupts - it's ok to use spinlocks in interrupts -on other CPU's, because an interrupt on another CPU doesn't interrupt the -CPU that holds the lock, so the lock-holder can continue and eventually -releases the lock). - -Note that you can be clever with read-write locks and interrupts. For -example, if you know that the interrupt only ever gets a read-lock, then -you can use a non-irq version of read locks everywhere - because they -don't block on each other (and thus there is no dead-lock wrt interrupts. -But when you do the write-lock, you have to use the irq-safe version. - -For an example of being clever with rw-locks, see the "waitqueue_lock" -handling in kernel/sched/core.c - nothing ever _changes_ a wait-queue from -within an interrupt, they only read the queue in order to know whom to -wake up. So read-locks are safe (which is good: they are very common -indeed), while write-locks need to protect themselves against interrupts. - - Linus - ----- - -Reference information: - -For dynamic initialization, use spin_lock_init() or rwlock_init() as -appropriate: - - spinlock_t xxx_lock; - rwlock_t xxx_rw_lock; - - static int __init xxx_init(void) - { - spin_lock_init(&xxx_lock); - rwlock_init(&xxx_rw_lock); - ... - } - - module_init(xxx_init); - -For static initialization, use DEFINE_SPINLOCK() / DEFINE_RWLOCK() or -__SPIN_LOCK_UNLOCKED() / __RW_LOCK_UNLOCKED() as appropriate. diff --git a/Documentation/ww-mutex-design.txt b/Documentation/ww-mutex-design.txt deleted file mode 100644 index 8a112dc304c3..000000000000 --- a/Documentation/ww-mutex-design.txt +++ /dev/null @@ -1,344 +0,0 @@ -Wait/Wound Deadlock-Proof Mutex Design -====================================== - -Please read mutex-design.txt first, as it applies to wait/wound mutexes too. - -Motivation for WW-Mutexes -------------------------- - -GPU's do operations that commonly involve many buffers. Those buffers -can be shared across contexts/processes, exist in different memory -domains (for example VRAM vs system memory), and so on. And with -PRIME / dmabuf, they can even be shared across devices. So there are -a handful of situations where the driver needs to wait for buffers to -become ready. If you think about this in terms of waiting on a buffer -mutex for it to become available, this presents a problem because -there is no way to guarantee that buffers appear in a execbuf/batch in -the same order in all contexts. That is directly under control of -userspace, and a result of the sequence of GL calls that an application -makes. Which results in the potential for deadlock. The problem gets -more complex when you consider that the kernel may need to migrate the -buffer(s) into VRAM before the GPU operates on the buffer(s), which -may in turn require evicting some other buffers (and you don't want to -evict other buffers which are already queued up to the GPU), but for a -simplified understanding of the problem you can ignore this. - -The algorithm that the TTM graphics subsystem came up with for dealing with -this problem is quite simple. For each group of buffers (execbuf) that need -to be locked, the caller would be assigned a unique reservation id/ticket, -from a global counter. In case of deadlock while locking all the buffers -associated with a execbuf, the one with the lowest reservation ticket (i.e. -the oldest task) wins, and the one with the higher reservation id (i.e. the -younger task) unlocks all of the buffers that it has already locked, and then -tries again. - -In the RDBMS literature this deadlock handling approach is called wait/wound: -The older tasks waits until it can acquire the contended lock. The younger tasks -needs to back off and drop all the locks it is currently holding, i.e. the -younger task is wounded. - -Concepts --------- - -Compared to normal mutexes two additional concepts/objects show up in the lock -interface for w/w mutexes: - -Acquire context: To ensure eventual forward progress it is important the a task -trying to acquire locks doesn't grab a new reservation id, but keeps the one it -acquired when starting the lock acquisition. This ticket is stored in the -acquire context. Furthermore the acquire context keeps track of debugging state -to catch w/w mutex interface abuse. - -W/w class: In contrast to normal mutexes the lock class needs to be explicit for -w/w mutexes, since it is required to initialize the acquire context. - -Furthermore there are three different class of w/w lock acquire functions: - -* Normal lock acquisition with a context, using ww_mutex_lock. - -* Slowpath lock acquisition on the contending lock, used by the wounded task - after having dropped all already acquired locks. These functions have the - _slow postfix. - - From a simple semantics point-of-view the _slow functions are not strictly - required, since simply calling the normal ww_mutex_lock functions on the - contending lock (after having dropped all other already acquired locks) will - work correctly. After all if no other ww mutex has been acquired yet there's - no deadlock potential and hence the ww_mutex_lock call will block and not - prematurely return -EDEADLK. The advantage of the _slow functions is in - interface safety: - - ww_mutex_lock has a __must_check int return type, whereas ww_mutex_lock_slow - has a void return type. Note that since ww mutex code needs loops/retries - anyway the __must_check doesn't result in spurious warnings, even though the - very first lock operation can never fail. - - When full debugging is enabled ww_mutex_lock_slow checks that all acquired - ww mutex have been released (preventing deadlocks) and makes sure that we - block on the contending lock (preventing spinning through the -EDEADLK - slowpath until the contended lock can be acquired). - -* Functions to only acquire a single w/w mutex, which results in the exact same - semantics as a normal mutex. This is done by calling ww_mutex_lock with a NULL - context. - - Again this is not strictly required. But often you only want to acquire a - single lock in which case it's pointless to set up an acquire context (and so - better to avoid grabbing a deadlock avoidance ticket). - -Of course, all the usual variants for handling wake-ups due to signals are also -provided. - -Usage ------ - -Three different ways to acquire locks within the same w/w class. Common -definitions for methods #1 and #2: - -static DEFINE_WW_CLASS(ww_class); - -struct obj { - struct ww_mutex lock; - /* obj data */ -}; - -struct obj_entry { - struct list_head head; - struct obj *obj; -}; - -Method 1, using a list in execbuf->buffers that's not allowed to be reordered. -This is useful if a list of required objects is already tracked somewhere. -Furthermore the lock helper can use propagate the -EALREADY return code back to -the caller as a signal that an object is twice on the list. This is useful if -the list is constructed from userspace input and the ABI requires userspace to -not have duplicate entries (e.g. for a gpu commandbuffer submission ioctl). - -int lock_objs(struct list_head *list, struct ww_acquire_ctx *ctx) -{ - struct obj *res_obj = NULL; - struct obj_entry *contended_entry = NULL; - struct obj_entry *entry; - - ww_acquire_init(ctx, &ww_class); - -retry: - list_for_each_entry (entry, list, head) { - if (entry->obj == res_obj) { - res_obj = NULL; - continue; - } - ret = ww_mutex_lock(&entry->obj->lock, ctx); - if (ret < 0) { - contended_entry = entry; - goto err; - } - } - - ww_acquire_done(ctx); - return 0; - -err: - list_for_each_entry_continue_reverse (entry, list, head) - ww_mutex_unlock(&entry->obj->lock); - - if (res_obj) - ww_mutex_unlock(&res_obj->lock); - - if (ret == -EDEADLK) { - /* we lost out in a seqno race, lock and retry.. */ - ww_mutex_lock_slow(&contended_entry->obj->lock, ctx); - res_obj = contended_entry->obj; - goto retry; - } - ww_acquire_fini(ctx); - - return ret; -} - -Method 2, using a list in execbuf->buffers that can be reordered. Same semantics -of duplicate entry detection using -EALREADY as method 1 above. But the -list-reordering allows for a bit more idiomatic code. - -int lock_objs(struct list_head *list, struct ww_acquire_ctx *ctx) -{ - struct obj_entry *entry, *entry2; - - ww_acquire_init(ctx, &ww_class); - - list_for_each_entry (entry, list, head) { - ret = ww_mutex_lock(&entry->obj->lock, ctx); - if (ret < 0) { - entry2 = entry; - - list_for_each_entry_continue_reverse (entry2, list, head) - ww_mutex_unlock(&entry2->obj->lock); - - if (ret != -EDEADLK) { - ww_acquire_fini(ctx); - return ret; - } - - /* we lost out in a seqno race, lock and retry.. */ - ww_mutex_lock_slow(&entry->obj->lock, ctx); - - /* - * Move buf to head of the list, this will point - * buf->next to the first unlocked entry, - * restarting the for loop. - */ - list_del(&entry->head); - list_add(&entry->head, list); - } - } - - ww_acquire_done(ctx); - return 0; -} - -Unlocking works the same way for both methods #1 and #2: - -void unlock_objs(struct list_head *list, struct ww_acquire_ctx *ctx) -{ - struct obj_entry *entry; - - list_for_each_entry (entry, list, head) - ww_mutex_unlock(&entry->obj->lock); - - ww_acquire_fini(ctx); -} - -Method 3 is useful if the list of objects is constructed ad-hoc and not upfront, -e.g. when adjusting edges in a graph where each node has its own ww_mutex lock, -and edges can only be changed when holding the locks of all involved nodes. w/w -mutexes are a natural fit for such a case for two reasons: -- They can handle lock-acquisition in any order which allows us to start walking - a graph from a starting point and then iteratively discovering new edges and - locking down the nodes those edges connect to. -- Due to the -EALREADY return code signalling that a given objects is already - held there's no need for additional book-keeping to break cycles in the graph - or keep track off which looks are already held (when using more than one node - as a starting point). - -Note that this approach differs in two important ways from the above methods: -- Since the list of objects is dynamically constructed (and might very well be - different when retrying due to hitting the -EDEADLK wound condition) there's - no need to keep any object on a persistent list when it's not locked. We can - therefore move the list_head into the object itself. -- On the other hand the dynamic object list construction also means that the -EALREADY return - code can't be propagated. - -Note also that methods #1 and #2 and method #3 can be combined, e.g. to first lock a -list of starting nodes (passed in from userspace) using one of the above -methods. And then lock any additional objects affected by the operations using -method #3 below. The backoff/retry procedure will be a bit more involved, since -when the dynamic locking step hits -EDEADLK we also need to unlock all the -objects acquired with the fixed list. But the w/w mutex debug checks will catch -any interface misuse for these cases. - -Also, method 3 can't fail the lock acquisition step since it doesn't return --EALREADY. Of course this would be different when using the _interruptible -variants, but that's outside of the scope of these examples here. - -struct obj { - struct ww_mutex ww_mutex; - struct list_head locked_list; -}; - -static DEFINE_WW_CLASS(ww_class); - -void __unlock_objs(struct list_head *list) -{ - struct obj *entry, *temp; - - list_for_each_entry_safe (entry, temp, list, locked_list) { - /* need to do that before unlocking, since only the current lock holder is - allowed to use object */ - list_del(&entry->locked_list); - ww_mutex_unlock(entry->ww_mutex) - } -} - -void lock_objs(struct list_head *list, struct ww_acquire_ctx *ctx) -{ - struct obj *obj; - - ww_acquire_init(ctx, &ww_class); - -retry: - /* re-init loop start state */ - loop { - /* magic code which walks over a graph and decides which objects - * to lock */ - - ret = ww_mutex_lock(obj->ww_mutex, ctx); - if (ret == -EALREADY) { - /* we have that one already, get to the next object */ - continue; - } - if (ret == -EDEADLK) { - __unlock_objs(list); - - ww_mutex_lock_slow(obj, ctx); - list_add(&entry->locked_list, list); - goto retry; - } - - /* locked a new object, add it to the list */ - list_add_tail(&entry->locked_list, list); - } - - ww_acquire_done(ctx); - return 0; -} - -void unlock_objs(struct list_head *list, struct ww_acquire_ctx *ctx) -{ - __unlock_objs(list); - ww_acquire_fini(ctx); -} - -Method 4: Only lock one single objects. In that case deadlock detection and -prevention is obviously overkill, since with grabbing just one lock you can't -produce a deadlock within just one class. To simplify this case the w/w mutex -api can be used with a NULL context. - -Implementation Details ----------------------- - -Design: - ww_mutex currently encapsulates a struct mutex, this means no extra overhead for - normal mutex locks, which are far more common. As such there is only a small - increase in code size if wait/wound mutexes are not used. - - In general, not much contention is expected. The locks are typically used to - serialize access to resources for devices. The only way to make wakeups - smarter would be at the cost of adding a field to struct mutex_waiter. This - would add overhead to all cases where normal mutexes are used, and - ww_mutexes are generally less performance sensitive. - -Lockdep: - Special care has been taken to warn for as many cases of api abuse - as possible. Some common api abuses will be caught with - CONFIG_DEBUG_MUTEXES, but CONFIG_PROVE_LOCKING is recommended. - - Some of the errors which will be warned about: - - Forgetting to call ww_acquire_fini or ww_acquire_init. - - Attempting to lock more mutexes after ww_acquire_done. - - Attempting to lock the wrong mutex after -EDEADLK and - unlocking all mutexes. - - Attempting to lock the right mutex after -EDEADLK, - before unlocking all mutexes. - - - Calling ww_mutex_lock_slow before -EDEADLK was returned. - - - Unlocking mutexes with the wrong unlock function. - - Calling one of the ww_acquire_* twice on the same context. - - Using a different ww_class for the mutex than for the ww_acquire_ctx. - - Normal lockdep errors that can result in deadlocks. - - Some of the lockdep errors that can result in deadlocks: - - Calling ww_acquire_init to initialize a second ww_acquire_ctx before - having called ww_acquire_fini on the first. - - 'normal' deadlocks that can occur. - -FIXME: Update this section once we have the TASK_DEADLOCK task state flag magic -implemented. diff --git a/MAINTAINERS b/MAINTAINERS index 1acc624ecfd7..aac481fcbf5c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5523,8 +5523,8 @@ M: Ingo Molnar L: linux-kernel@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git core/locking S: Maintained -F: Documentation/lockdep*.txt -F: Documentation/lockstat.txt +F: Documentation/locking/lockdep*.txt +F: Documentation/locking/lockstat.txt F: include/linux/lockdep.h F: kernel/locking/ diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c index 0dc57d5ecd10..3a02e5e3e9f3 100644 --- a/drivers/gpu/drm/drm_modeset_lock.c +++ b/drivers/gpu/drm/drm_modeset_lock.c @@ -35,7 +35,7 @@ * of extra utility/tracking out of our acquire-ctx. This is provided * by drm_modeset_lock / drm_modeset_acquire_ctx. * - * For basic principles of ww_mutex, see: Documentation/ww-mutex-design.txt + * For basic principles of ww_mutex, see: Documentation/locking/ww-mutex-design.txt * * The basic usage pattern is to: * diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 008388f920d7..f388481201cd 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -4,7 +4,7 @@ * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra * - * see Documentation/lockdep-design.txt for more details. + * see Documentation/locking/lockdep-design.txt for more details. */ #ifndef __LINUX_LOCKDEP_H #define __LINUX_LOCKDEP_H diff --git a/include/linux/mutex.h b/include/linux/mutex.h index e4c29418f407..cc31498fc526 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -133,7 +133,7 @@ static inline int mutex_is_locked(struct mutex *lock) /* * See kernel/locking/mutex.c for detailed documentation of these APIs. - * Also see Documentation/mutex-design.txt. + * Also see Documentation/locking/mutex-design.txt. */ #ifdef CONFIG_DEBUG_LOCK_ALLOC extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 035d3c57fc8a..8f498cdde280 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -149,7 +149,7 @@ extern void downgrade_write(struct rw_semaphore *sem); * static then another method for expressing nested locking is * the explicit definition of lock class keys and the use of * lockdep_set_class() at lock initialization time. - * See Documentation/lockdep-design.txt for more details.) + * See Documentation/locking/lockdep-design.txt for more details.) */ extern void down_read_nested(struct rw_semaphore *sem, int subclass); extern void down_write_nested(struct rw_semaphore *sem, int subclass); diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 0d8b6ed93874..dadbf88c22c4 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -15,7 +15,7 @@ * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale * and Sven Dietrich. * - * Also see Documentation/mutex-design.txt. + * Also see Documentation/locking/mutex-design.txt. */ #include #include diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index a0ea2a141b3b..7c98873a3077 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -8,7 +8,7 @@ * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt * Copyright (C) 2006 Esben Nielsen * - * See Documentation/rt-mutex-design.txt for details. + * See Documentation/locking/rt-mutex-design.txt for details. */ #include #include diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 901096d31c66..9b94a063e26c 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -924,7 +924,7 @@ config PROVE_LOCKING the proof of observed correctness is also maintained for an arbitrary combination of these separate locking variants. - For more details, see Documentation/lockdep-design.txt. + For more details, see Documentation/locking/lockdep-design.txt. config LOCKDEP bool @@ -945,7 +945,7 @@ config LOCK_STAT help This feature enables tracking lock contention points - For more details, see Documentation/lockstat.txt + For more details, see Documentation/locking/lockstat.txt This also enables lock events required by "perf lock", subcommand of perf. -- cgit v1.2.3-59-g8ed1b From ebef9c1236170fb3ba4dc05ccb41f6d842eb1c12 Mon Sep 17 00:00:00 2001 From: Varka Bhadram Date: Fri, 8 Aug 2014 17:32:45 +0530 Subject: MAINTAINERS: update maintainers info adds the mailing list address for bluetooth 6loWPAN and IEEE-802.15.4 subsystems. Also adds web page info. Signed-off-by: Varka Bhadram Signed-off-by: Marcel Holtmann --- MAINTAINERS | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 2f85f55c8fb8..5cc0b81e4a33 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -152,8 +152,8 @@ F: drivers/scsi/53c700* 6LOWPAN GENERIC (BTLE/IEEE 802.15.4) M: Alexander Aring -L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers) L: linux-bluetooth@vger.kernel.org +L: linux-wpan@vger.kernel.org S: Maintained F: net/6lowpan/ F: include/net/6lowpan.h @@ -4564,13 +4564,14 @@ F: drivers/idle/i7300_idle.c IEEE 802.15.4 SUBSYSTEM M: Alexander Aring -L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers) -W: http://apps.sourceforge.net/trac/linux-zigbee -T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git +L: linux-wpan@vger.kernel.org +W: https://github.com/linux-wpan +T: git git://github.com/linux-wpan/linux-wpan-next.git S: Maintained F: net/ieee802154/ F: net/mac802154/ F: drivers/net/ieee802154/ +F: Documentation/networking/ieee802154.txt IGUANAWORKS USB IR TRANSCEIVER M: Sean Young -- cgit v1.2.3-59-g8ed1b From 5cdeb2c837ddcf5b67692816952d0f38e29c2d7b Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Sat, 16 Aug 2014 12:31:11 +0100 Subject: regulator: Restore L: linux-kernel@vger.kernel.org entry As with commit 981c3a4ff85 (MAINTAINERS: Restore "L: linux-kernel@vger.kernel.org" entries) restore the mailing list entry for the regulator framework in order to assist users in finding the list if they read the file instead of using get_maintainers.pl. Signed-off-by: Mark Brown --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index aefa94841ff3..c42f647a5fe9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9827,6 +9827,7 @@ F: drivers/scsi/vmw_pvscsi.h VOLTAGE AND CURRENT REGULATOR FRAMEWORK M: Liam Girdwood M: Mark Brown +L: linux-kernel@vger.kernel.org W: http://opensource.wolfsonmicro.com/node/15 W: http://www.slimlogic.co.uk/?p=48 T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regulator.git -- cgit v1.2.3-59-g8ed1b From dd060bc92748ce77231b2cd2657510b77cd94dea Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Sat, 16 Aug 2014 12:30:58 +0100 Subject: regmap: Restore L: linux-kernel@vger.kernel.org entry As with commit 981c3a4ff85 (MAINTAINERS: Restore "L: linux-kernel@vger.kernel.org" entries) restore the mailing list entry for the regmap framework in order to assist users in finding the list if they read the file instead of using get_maintainers.pl. Signed-off-by: Mark Brown --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index aefa94841ff3..0bb827ac690b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7550,6 +7550,7 @@ F: fs/reiserfs/ REGISTER MAP ABSTRACTION M: Mark Brown +L: linux-kernel@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap.git S: Supported F: drivers/base/regmap/ -- cgit v1.2.3-59-g8ed1b From f7b98477f613a69b74ba4a715856630cc6508c0d Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Mon, 4 Aug 2014 15:51:48 +0900 Subject: ARM: shmobile: Remove genmai_defconfig from MAINTAINERS The genmai defconfig file has been removed by 3ed27bd90d6d0c8b ("ARM: shmobile: genmai: remove defconfig") so remove its entry in the MAINTAINERS accordingly. Reported-by: Joe Perches Signed-off-by: Simon Horman --- MAINTAINERS | 1 - 1 file changed, 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index aefa94841ff3..99b78075a50c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1355,7 +1355,6 @@ F: arch/arm/boot/dts/sh* F: arch/arm/configs/ape6evm_defconfig F: arch/arm/configs/armadillo800eva_defconfig F: arch/arm/configs/bockw_defconfig -F: arch/arm/configs/genmai_defconfig F: arch/arm/configs/koelsch_defconfig F: arch/arm/configs/kzm9g_defconfig F: arch/arm/configs/lager_defconfig -- cgit v1.2.3-59-g8ed1b From f0d61161620019599868a5840df16a9d483a96cf Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Mon, 18 Aug 2014 09:29:00 +0100 Subject: MAINTAINERS: Add designated reviewers for IIO subsystem Add those persons who generally tend to review new IIO patches to the list of designated reviewers to make sure that they are Cc'ed on new patches. Signed-off-by: Lars-Peter Clausen Acked-by: Peter Meerwald Acked-by: Hartmut Knaack Signed-off-by: Jonathan Cameron --- MAINTAINERS | 3 +++ 1 file changed, 3 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 6d2c52ea407d..23aee9f6ec47 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4537,6 +4537,9 @@ F: drivers/media/rc/iguanair.c IIO SUBSYSTEM AND DRIVERS M: Jonathan Cameron +R: Hartmut Knaack +R: Lars-Peter Clausen +R: Peter Meerwald L: linux-iio@vger.kernel.org S: Maintained F: drivers/iio/ -- cgit v1.2.3-59-g8ed1b From 4ce72abc6ea768d6f214456adcd7e0a293cbc065 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Mon, 18 Aug 2014 09:08:00 +0100 Subject: MAINTAINERS: Add entry for Analog Devices IIO drivers Add Michael and myself as the maintainer for the Analog Devices IIO drivers. The entry matches on all files in drivers/staging/iio and drivers/iio/ starting with the 'ad' prefix, except for 'adjd' as that one is used by Avago Technologies. Signed-off-by: Lars-Peter Clausen Signed-off-by: Jonathan Cameron --- MAINTAINERS | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 23aee9f6ec47..316218cd9d1f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -666,6 +666,17 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers) W: http://blackfin.uclinux.org/ S: Supported F: sound/soc/blackfin/* + +ANALOG DEVICES INC IIO DRIVERS +M: Lars-Peter Clausen +M: Michael Hennerich +W: http://wiki.analog.com/ +W: http://ez.analog.com/community/linux-device-drivers +S: Supported +F: drivers/iio/*/ad* +X: drivers/iio/*/adjd* +F: drivers/staging/iio/*/ad* +F: staging/iio/trigger/iio-trig-bfin-timer.c AOA (Apple Onboard Audio) ALSA DRIVER M: Johannes Berg -- cgit v1.2.3-59-g8ed1b From 08223d80df38e666a42d7c82eb340db55c6e03bd Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 19 Aug 2014 06:07:56 -0700 Subject: dmaengine maintainer update I am stepping down as dmaengine maintainer as the bulk of the activity in the subsystem is primarily targeted at the slave-dma case handled by Vinod, and I have recently been unable to give the few patches I do receive timely review. There is still an item in my backlog to eliminate the async_tx api and the constraints it poses on dmaengine drivers, but I need not hold on to the maintainer role in the meantime. I will still be subscribed to dmaengine@vger.kernel.org to answer questions, but all patches should be routed through Vinod unless/until a maintainer for the non-slave-dma use case arrives. It is non-entirely clear at this point that there is enough work going forward for a separate maintainer of the pure-offload case. Ongoing development of the ioatdma driver is handled by Dave. I'm still interested in reviewing ioatdma patches, but he is the primary maintainer/developer going forward. IOP platforms are not generating any traffic in my inbox, but if a patch did arrive I've long since lost access to hardware. Cc: Andrew Morton Cc: Vinod Koul Cc: Dave Jiang Signed-off-by: Dan Williams --- MAINTAINERS | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index b3fdb0f004ba..8873ca652521 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -970,24 +970,20 @@ F: arch/arm/mach-pxa/colibri-pxa270-income.c ARM/INTEL IOP32X ARM ARCHITECTURE M: Lennert Buytenhek -M: Dan Williams L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/INTEL IOP33X ARM ARCHITECTURE -M: Dan Williams L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -S: Maintained +S: Orphan ARM/INTEL IOP13XX ARM ARCHITECTURE M: Lennert Buytenhek -M: Dan Williams L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/INTEL IQ81342EX MACHINE SUPPORT M: Lennert Buytenhek -M: Dan Williams L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@ -1012,7 +1008,6 @@ F: drivers/pcmcia/pxa2xx_stargate2.c ARM/INTEL XSC3 (MANZANO) ARM CORE M: Lennert Buytenhek -M: Dan Williams L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@ -1444,9 +1439,9 @@ F: drivers/platform/x86/asus*.c F: drivers/platform/x86/eeepc*.c ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API -M: Dan Williams +R: Dan Williams W: http://sourceforge.net/projects/xscaleiop -S: Maintained +S: Odd fixes F: Documentation/crypto/async-tx-api.txt F: crypto/async_tx/ F: drivers/dma/ @@ -2778,13 +2773,11 @@ T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git DMA GENERIC OFFLOAD ENGINE SUBSYSTEM M: Vinod Koul -M: Dan Williams L: dmaengine@vger.kernel.org Q: https://patchwork.kernel.org/project/linux-dmaengine/list/ -S: Supported +S: Maintained F: drivers/dma/ F: include/linux/dma* -T: git git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx.git T: git git://git.infradead.org/users/vkoul/slave-dma.git (slave-dma) DME1737 HARDWARE MONITOR DRIVER @@ -4500,8 +4493,8 @@ F: arch/x86/kernel/microcode_core.c F: arch/x86/kernel/microcode_intel.c INTEL I/OAT DMA DRIVER -M: Dan Williams M: Dave Jiang +R: Dan Williams L: dmaengine@vger.kernel.org Q: https://patchwork.kernel.org/project/linux-dmaengine/list/ S: Supported @@ -4516,7 +4509,7 @@ F: drivers/iommu/intel-iommu.c F: include/linux/intel-iommu.h INTEL IOP-ADMA DMA DRIVER -M: Dan Williams +R: Dan Williams S: Odd fixes F: drivers/dma/iop-adma.c -- cgit v1.2.3-59-g8ed1b From ba2b7d0ad59f4e3fb2619f840b8e89060ab5dc61 Mon Sep 17 00:00:00 2001 From: Dinh Nguyen Date: Wed, 6 Aug 2014 16:31:27 -0500 Subject: MAINTAINERS: update entries for ARM/SOCFPGA platform Update email address, add W and T entries. Signed-off-by: Dinh Nguyen --- MAINTAINERS | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 1ff06dee651d..dd19acdb618a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1367,12 +1367,15 @@ F: arch/arm/mach-shmobile/ F: drivers/sh/ ARM/SOCFPGA ARCHITECTURE -M: Dinh Nguyen +M: Dinh Nguyen S: Maintained F: arch/arm/mach-socfpga/ +W: http://www.rocketboards.org +T: git://git.rocketboards.org/linux-socfpga.git +T: git://git.rocketboards.org/linux-socfpga-next.git ARM/SOCFPGA CLOCK FRAMEWORK SUPPORT -M: Dinh Nguyen +M: Dinh Nguyen S: Maintained F: drivers/clk/socfpga/ -- cgit v1.2.3-59-g8ed1b From 1eb3b2167433a7ae1950c6ed4cc4aaad30498f09 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 11 Aug 2014 13:14:25 +0300 Subject: MAINTAINTERS: The NFC list is subscribers-only It's not moderated, it's subscribers-only. Signed-off-by: Dan Carpenter Signed-off-by: John W. Linville --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index b0a9003a1334..0b7007a9c8a4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6357,7 +6357,7 @@ M: Lauro Ramos Venancio M: Aloisio Almeida Jr M: Samuel Ortiz L: linux-wireless@vger.kernel.org -L: linux-nfc@lists.01.org (moderated for non-subscribers) +L: linux-nfc@lists.01.org (subscribers-only) S: Supported F: net/nfc/ F: include/net/nfc/ -- cgit v1.2.3-59-g8ed1b From 9f35a3ccb7b6aeb0fa2df73213b004df870ca466 Mon Sep 17 00:00:00 2001 From: Shahed Shaikh Date: Wed, 27 Aug 2014 12:43:21 -0400 Subject: MAINTAINERS: Update group email alias for qlcnic driver Signed-off-by: Shahed Shaikh Signed-off-by: David S. Miller --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index f01f54f27750..c9b4b55bcbb3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7361,7 +7361,7 @@ F: drivers/net/ethernet/qlogic/qla3xxx.* QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER M: Shahed Shaikh -M: Dept-HSGLinuxNICDev@qlogic.com +M: Dept-GELinuxNICDev@qlogic.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/qlogic/qlcnic/ -- cgit v1.2.3-59-g8ed1b From f5e5de1e9edf1ac47c4e13a4ff864e76d8eb1fe1 Mon Sep 17 00:00:00 2001 From: Oleg Drokin Date: Fri, 15 Aug 2014 12:48:14 -0400 Subject: lustre: Add MAINTAINERS entry Just add the entry with some info. Signed-off-by: Oleg Drokin Signed-off-by: Greg Kroah-Hartman --- MAINTAINERS | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 1c047098d887..ddca4abd67db 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8669,6 +8669,14 @@ W: http://www.lirc.org/ S: Odd Fixes F: drivers/staging/media/lirc/ +STAGING - LUSTRE PARALLEL FILESYSTEM +M: Oleg Drokin +M: Andreas Dilger +L: HPDD-discuss@lists.01.org (moderated for non-subscribers) +W: http://lustre.opensfs.org/ +S: Maintained +F: drivers/staging/lustre + STAGING - NVIDIA COMPLIANT EMBEDDED CONTROLLER INTERFACE (nvec) M: Julian Andres Klode M: Marc Dietrich -- cgit v1.2.3-59-g8ed1b From d696b7f780b66135e34a4dcd781828acf907f6df Mon Sep 17 00:00:00 2001 From: Eli Billauer Date: Tue, 2 Sep 2014 14:36:37 +0300 Subject: MAINTAINERS: Add an entry for staging/xillybus Suggested-by: Greg Kroah-Hartman Signed-off-by: Eli Billauer Signed-off-by: Greg Kroah-Hartman --- MAINTAINERS | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 3d43db19efde..f040e7f3af85 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8789,6 +8789,12 @@ M: Arnaud Patard S: Odd Fixes F: drivers/staging/xgifb/ +STAGING - XILLYBUS DRIVER +M: Eli Billauer +L: devel@driverdev.osuosl.org +S: Supported +F: drivers/staging/xillybus/ + STARFIRE/DURALAN NETWORK DRIVER M: Ion Badulescu S: Odd Fixes -- cgit v1.2.3-59-g8ed1b From cbce710709f2b57cb955a98c0d3fad1559c4d93d Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 28 Aug 2014 12:17:42 +0200 Subject: MAINTAINERS: Update Daniel Vetter's email address Signed-off-by: Daniel Vetter --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 1ff06dee651d..655e927c087e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3086,7 +3086,7 @@ F: include/drm/drm_panel.h F: Documentation/devicetree/bindings/panel/ INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) -M: Daniel Vetter +M: Daniel Vetter M: Jani Nikula L: intel-gfx@lists.freedesktop.org L: dri-devel@lists.freedesktop.org -- cgit v1.2.3-59-g8ed1b From 5b5a9069e889cd87887cbd3ced6353281c7686b6 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 28 Aug 2014 09:56:47 +0200 Subject: MAINTAINERS: Add irqchip DT bindings doc path to IRQCHIP DRIVERS section Signed-off-by: Geert Uytterhoeven Link: https://lkml.kernel.org/r/1409212607-4021-1-git-send-email-geert+renesas@glider.be Signed-off-by: Jason Cooper --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index aefa94841ff3..71e86afa6820 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4948,6 +4948,7 @@ L: linux-kernel@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core T: git git://git.infradead.org/users/jcooper/linux.git irqchip/core +F: Documentation/devicetree/bindings/interrupt-controller/ F: drivers/irqchip/ IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY) -- cgit v1.2.3-59-g8ed1b From a0cfd75fdc46b56978ece383a7d6f6b04e9087ad Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 12 Aug 2014 15:41:17 -0700 Subject: seccomp: Add reviewers to MAINTAINERS This adds two reviewers to the seccomp tree. Signed-off-by: Kees Cook --- MAINTAINERS | 2 ++ 1 file changed, 2 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 55762cba8516..992335ca0b2c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7955,6 +7955,8 @@ F: drivers/mmc/host/sdhci-pltfm.[ch] SECURE COMPUTING M: Kees Cook +R: Andy Lutomirski +R: Will Drewry T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git seccomp S: Supported F: kernel/seccomp.c -- cgit v1.2.3-59-g8ed1b From 71bcada88b0f3c7f11fa5b8a4b30ae66dbfabbf3 Mon Sep 17 00:00:00 2001 From: Thor Thayer Date: Wed, 3 Sep 2014 10:27:54 -0500 Subject: edac: altera: Add Altera SDRAM EDAC support This patch adds support for the CycloneV and ArriaV SDRAM controllers. Correction and reporting of SBEs, Panic on DBEs. There was a discussion thread on whether this driver should be an mfd driver or just make use of syscon, which is already a mfd. Ultimately, the decision to use a simple syscon interface was reached.[1] [1] https://lkml.org/lkml/2014/7/30/514 [dinguyen] Fixed Kconfig to have EDAC_ALTERA_MC as a tristate to prevent a build failure for allmodconfig. Signed-off-by: Thor Thayer Acked-by: Borislav Petkov [dinguyen] cleaned up commit message Signed-off-by: Dinh Nguyen --- MAINTAINERS | 5 + drivers/edac/Kconfig | 9 + drivers/edac/Makefile | 2 + drivers/edac/altera_edac.c | 410 +++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 426 insertions(+) create mode 100644 drivers/edac/altera_edac.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 1ff06dee651d..a5dd2d61777a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1376,6 +1376,11 @@ M: Dinh Nguyen S: Maintained F: drivers/clk/socfpga/ +ARM/SOCFPGA EDAC SUPPORT +M: Thor Thayer +S: Maintained +F: drivers/edac/altera_edac. + ARM/STI ARCHITECTURE M: Srinivas Kandagatla M: Maxime Coquelin diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index fd89ca982748..7072c2892d63 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -376,4 +376,13 @@ config EDAC_OCTEON_PCI Support for error detection and correction on the Cavium Octeon family of SOCs. +config EDAC_ALTERA_MC + tristate "Altera SDRAM Memory Controller EDAC" + depends on EDAC_MM_EDAC && ARCH_SOCFPGA + help + Support for error detection and correction on the + Altera SDRAM memory controller. Note that the + preloader must initialize the SDRAM before loading + the kernel. + endif # EDAC diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index c479a24d8f77..359aa499b200 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -65,3 +65,5 @@ obj-$(CONFIG_EDAC_OCTEON_PC) += octeon_edac-pc.o obj-$(CONFIG_EDAC_OCTEON_L2C) += octeon_edac-l2c.o obj-$(CONFIG_EDAC_OCTEON_LMC) += octeon_edac-lmc.o obj-$(CONFIG_EDAC_OCTEON_PCI) += octeon_edac-pci.o + +obj-$(CONFIG_EDAC_ALTERA_MC) += altera_edac.o diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c new file mode 100644 index 000000000000..3c4929fda9d5 --- /dev/null +++ b/drivers/edac/altera_edac.c @@ -0,0 +1,410 @@ +/* + * Copyright Altera Corporation (C) 2014. All rights reserved. + * Copyright 2011-2012 Calxeda, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + * Adapted from the highbank_mc_edac driver. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "edac_core.h" +#include "edac_module.h" + +#define EDAC_MOD_STR "altera_edac" +#define EDAC_VERSION "1" + +/* SDRAM Controller CtrlCfg Register */ +#define CTLCFG_OFST 0x00 + +/* SDRAM Controller CtrlCfg Register Bit Masks */ +#define CTLCFG_ECC_EN 0x400 +#define CTLCFG_ECC_CORR_EN 0x800 +#define CTLCFG_GEN_SB_ERR 0x2000 +#define CTLCFG_GEN_DB_ERR 0x4000 + +#define CTLCFG_ECC_AUTO_EN (CTLCFG_ECC_EN | \ + CTLCFG_ECC_CORR_EN) + +/* SDRAM Controller Address Width Register */ +#define DRAMADDRW_OFST 0x2C + +/* SDRAM Controller Address Widths Field Register */ +#define DRAMADDRW_COLBIT_MASK 0x001F +#define DRAMADDRW_COLBIT_SHIFT 0 +#define DRAMADDRW_ROWBIT_MASK 0x03E0 +#define DRAMADDRW_ROWBIT_SHIFT 5 +#define DRAMADDRW_BANKBIT_MASK 0x1C00 +#define DRAMADDRW_BANKBIT_SHIFT 10 +#define DRAMADDRW_CSBIT_MASK 0xE000 +#define DRAMADDRW_CSBIT_SHIFT 13 + +/* SDRAM Controller Interface Data Width Register */ +#define DRAMIFWIDTH_OFST 0x30 + +/* SDRAM Controller Interface Data Width Defines */ +#define DRAMIFWIDTH_16B_ECC 24 +#define DRAMIFWIDTH_32B_ECC 40 + +/* SDRAM Controller DRAM Status Register */ +#define DRAMSTS_OFST 0x38 + +/* SDRAM Controller DRAM Status Register Bit Masks */ +#define DRAMSTS_SBEERR 0x04 +#define DRAMSTS_DBEERR 0x08 +#define DRAMSTS_CORR_DROP 0x10 + +/* SDRAM Controller DRAM IRQ Register */ +#define DRAMINTR_OFST 0x3C + +/* SDRAM Controller DRAM IRQ Register Bit Masks */ +#define DRAMINTR_INTREN 0x01 +#define DRAMINTR_SBEMASK 0x02 +#define DRAMINTR_DBEMASK 0x04 +#define DRAMINTR_CORRDROPMASK 0x08 +#define DRAMINTR_INTRCLR 0x10 + +/* SDRAM Controller Single Bit Error Count Register */ +#define SBECOUNT_OFST 0x40 + +/* SDRAM Controller Single Bit Error Count Register Bit Masks */ +#define SBECOUNT_MASK 0x0F + +/* SDRAM Controller Double Bit Error Count Register */ +#define DBECOUNT_OFST 0x44 + +/* SDRAM Controller Double Bit Error Count Register Bit Masks */ +#define DBECOUNT_MASK 0x0F + +/* SDRAM Controller ECC Error Address Register */ +#define ERRADDR_OFST 0x48 + +/* SDRAM Controller ECC Error Address Register Bit Masks */ +#define ERRADDR_MASK 0xFFFFFFFF + +/* Altera SDRAM Memory Controller data */ +struct altr_sdram_mc_data { + struct regmap *mc_vbase; +}; + +static irqreturn_t altr_sdram_mc_err_handler(int irq, void *dev_id) +{ + struct mem_ctl_info *mci = dev_id; + struct altr_sdram_mc_data *drvdata = mci->pvt_info; + u32 status, err_count, err_addr; + + /* Error Address is shared by both SBE & DBE */ + regmap_read(drvdata->mc_vbase, ERRADDR_OFST, &err_addr); + + regmap_read(drvdata->mc_vbase, DRAMSTS_OFST, &status); + + if (status & DRAMSTS_DBEERR) { + regmap_read(drvdata->mc_vbase, DBECOUNT_OFST, &err_count); + panic("\nEDAC: [%d Uncorrectable errors @ 0x%08X]\n", + err_count, err_addr); + } + if (status & DRAMSTS_SBEERR) { + regmap_read(drvdata->mc_vbase, SBECOUNT_OFST, &err_count); + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, err_count, + err_addr >> PAGE_SHIFT, + err_addr & ~PAGE_MASK, 0, + 0, 0, -1, mci->ctl_name, ""); + } + + regmap_write(drvdata->mc_vbase, DRAMINTR_OFST, + (DRAMINTR_INTRCLR | DRAMINTR_INTREN)); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_EDAC_DEBUG +static ssize_t altr_sdr_mc_err_inject_write(struct file *file, + const char __user *data, + size_t count, loff_t *ppos) +{ + struct mem_ctl_info *mci = file->private_data; + struct altr_sdram_mc_data *drvdata = mci->pvt_info; + u32 *ptemp; + dma_addr_t dma_handle; + u32 reg, read_reg; + + ptemp = dma_alloc_coherent(mci->pdev, 16, &dma_handle, GFP_KERNEL); + if (!ptemp) { + dma_free_coherent(mci->pdev, 16, ptemp, dma_handle); + edac_printk(KERN_ERR, EDAC_MC, + "Inject: Buffer Allocation error\n"); + return -ENOMEM; + } + + regmap_read(drvdata->mc_vbase, CTLCFG_OFST, &read_reg); + read_reg &= ~(CTLCFG_GEN_SB_ERR | CTLCFG_GEN_DB_ERR); + + /* Error are injected by writing a word while the SBE or DBE + * bit in the CTLCFG register is set. Reading the word will + * trigger the SBE or DBE error and the corresponding IRQ. + */ + if (count == 3) { + edac_printk(KERN_ALERT, EDAC_MC, + "Inject Double bit error\n"); + regmap_write(drvdata->mc_vbase, CTLCFG_OFST, + (read_reg | CTLCFG_GEN_DB_ERR)); + } else { + edac_printk(KERN_ALERT, EDAC_MC, + "Inject Single bit error\n"); + regmap_write(drvdata->mc_vbase, CTLCFG_OFST, + (read_reg | CTLCFG_GEN_SB_ERR)); + } + + ptemp[0] = 0x5A5A5A5A; + ptemp[1] = 0xA5A5A5A5; + + /* Clear the error injection bits */ + regmap_write(drvdata->mc_vbase, CTLCFG_OFST, read_reg); + /* Ensure it has been written out */ + wmb(); + + /* + * To trigger the error, we need to read the data back + * (the data was written with errors above). + * The ACCESS_ONCE macros and printk are used to prevent the + * the compiler optimizing these reads out. + */ + reg = ACCESS_ONCE(ptemp[0]); + read_reg = ACCESS_ONCE(ptemp[1]); + /* Force Read */ + rmb(); + + edac_printk(KERN_ALERT, EDAC_MC, "Read Data [0x%X, 0x%X]\n", + reg, read_reg); + + dma_free_coherent(mci->pdev, 16, ptemp, dma_handle); + + return count; +} + +static const struct file_operations altr_sdr_mc_debug_inject_fops = { + .open = simple_open, + .write = altr_sdr_mc_err_inject_write, + .llseek = generic_file_llseek, +}; + +static void altr_sdr_mc_create_debugfs_nodes(struct mem_ctl_info *mci) +{ + if (mci->debugfs) + debugfs_create_file("inject_ctrl", S_IWUSR, mci->debugfs, mci, + &altr_sdr_mc_debug_inject_fops); +} +#else +static void altr_sdr_mc_create_debugfs_nodes(struct mem_ctl_info *mci) +{} +#endif + +/* Get total memory size in bytes */ +static u32 altr_sdram_get_total_mem_size(struct regmap *mc_vbase) +{ + u32 size, read_reg, row, bank, col, cs, width; + + if (regmap_read(mc_vbase, DRAMADDRW_OFST, &read_reg) < 0) + return 0; + + if (regmap_read(mc_vbase, DRAMIFWIDTH_OFST, &width) < 0) + return 0; + + col = (read_reg & DRAMADDRW_COLBIT_MASK) >> + DRAMADDRW_COLBIT_SHIFT; + row = (read_reg & DRAMADDRW_ROWBIT_MASK) >> + DRAMADDRW_ROWBIT_SHIFT; + bank = (read_reg & DRAMADDRW_BANKBIT_MASK) >> + DRAMADDRW_BANKBIT_SHIFT; + cs = (read_reg & DRAMADDRW_CSBIT_MASK) >> + DRAMADDRW_CSBIT_SHIFT; + + /* Correct for ECC as its not addressible */ + if (width == DRAMIFWIDTH_32B_ECC) + width = 32; + if (width == DRAMIFWIDTH_16B_ECC) + width = 16; + + /* calculate the SDRAM size base on this info */ + size = 1 << (row + bank + col); + size = size * cs * (width / 8); + return size; +} + +static int altr_sdram_probe(struct platform_device *pdev) +{ + struct edac_mc_layer layers[2]; + struct mem_ctl_info *mci; + struct altr_sdram_mc_data *drvdata; + struct regmap *mc_vbase; + struct dimm_info *dimm; + u32 read_reg, mem_size; + int irq; + int res = 0; + + /* Validate the SDRAM controller has ECC enabled */ + /* Grab the register range from the sdr controller in device tree */ + mc_vbase = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "altr,sdr-syscon"); + if (IS_ERR(mc_vbase)) { + edac_printk(KERN_ERR, EDAC_MC, + "regmap for altr,sdr-syscon lookup failed.\n"); + return -ENODEV; + } + + if (regmap_read(mc_vbase, CTLCFG_OFST, &read_reg) || + ((read_reg & CTLCFG_ECC_AUTO_EN) != CTLCFG_ECC_AUTO_EN)) { + edac_printk(KERN_ERR, EDAC_MC, + "No ECC/ECC disabled [0x%08X]\n", read_reg); + return -ENODEV; + } + + /* Grab memory size from device tree. */ + mem_size = altr_sdram_get_total_mem_size(mc_vbase); + if (!mem_size) { + edac_printk(KERN_ERR, EDAC_MC, + "Unable to calculate memory size\n"); + return -ENODEV; + } + + /* Ensure the SDRAM Interrupt is disabled and cleared */ + if (regmap_write(mc_vbase, DRAMINTR_OFST, DRAMINTR_INTRCLR)) { + edac_printk(KERN_ERR, EDAC_MC, + "Error clearing SDRAM ECC IRQ\n"); + return -ENODEV; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + edac_printk(KERN_ERR, EDAC_MC, + "No irq %d in DT\n", irq); + return -ENODEV; + } + + layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; + layers[0].size = 1; + layers[0].is_virt_csrow = true; + layers[1].type = EDAC_MC_LAYER_CHANNEL; + layers[1].size = 1; + layers[1].is_virt_csrow = false; + mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, + sizeof(struct altr_sdram_mc_data)); + if (!mci) + return -ENOMEM; + + mci->pdev = &pdev->dev; + drvdata = mci->pvt_info; + drvdata->mc_vbase = mc_vbase; + platform_set_drvdata(pdev, mci); + + if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) { + res = -ENOMEM; + goto free; + } + + mci->mtype_cap = MEM_FLAG_DDR3; + mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; + mci->edac_cap = EDAC_FLAG_SECDED; + mci->mod_name = EDAC_MOD_STR; + mci->mod_ver = EDAC_VERSION; + mci->ctl_name = dev_name(&pdev->dev); + mci->scrub_mode = SCRUB_SW_SRC; + mci->dev_name = dev_name(&pdev->dev); + + dimm = *mci->dimms; + dimm->nr_pages = ((mem_size - 1) >> PAGE_SHIFT) + 1; + dimm->grain = 8; + dimm->dtype = DEV_X8; + dimm->mtype = MEM_DDR3; + dimm->edac_mode = EDAC_SECDED; + + res = edac_mc_add_mc(mci); + if (res < 0) + goto err; + + res = devm_request_irq(&pdev->dev, irq, altr_sdram_mc_err_handler, + 0, dev_name(&pdev->dev), mci); + if (res < 0) { + edac_mc_printk(mci, KERN_ERR, + "Unable to request irq %d\n", irq); + res = -ENODEV; + goto err2; + } + + if (regmap_write(drvdata->mc_vbase, DRAMINTR_OFST, + (DRAMINTR_INTRCLR | DRAMINTR_INTREN))) { + edac_mc_printk(mci, KERN_ERR, + "Error enabling SDRAM ECC IRQ\n"); + res = -ENODEV; + goto err2; + } + + altr_sdr_mc_create_debugfs_nodes(mci); + + devres_close_group(&pdev->dev, NULL); + + return 0; + +err2: + edac_mc_del_mc(&pdev->dev); +err: + devres_release_group(&pdev->dev, NULL); +free: + edac_mc_free(mci); + edac_printk(KERN_ERR, EDAC_MC, + "EDAC Probe Failed; Error %d\n", res); + + return res; +} + +static int altr_sdram_remove(struct platform_device *pdev) +{ + struct mem_ctl_info *mci = platform_get_drvdata(pdev); + + edac_mc_del_mc(&pdev->dev); + edac_mc_free(mci); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static const struct of_device_id altr_sdram_ctrl_of_match[] = { + { .compatible = "altr,sdram-edac", }, + {}, +}; +MODULE_DEVICE_TABLE(of, altr_sdram_ctrl_of_match); + +static struct platform_driver altr_sdram_edac_driver = { + .probe = altr_sdram_probe, + .remove = altr_sdram_remove, + .driver = { + .name = "altr_sdram_edac", + .of_match_table = altr_sdram_ctrl_of_match, + }, +}; + +module_platform_driver(altr_sdram_edac_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Thor Thayer"); +MODULE_DESCRIPTION("EDAC Driver for Altera SDRAM Controller"); -- cgit v1.2.3-59-g8ed1b From 0c4ffcfe1fbc1ef564ec137eab21137cb013b00e Mon Sep 17 00:00:00 2001 From: Murali Karicheri Date: Tue, 2 Sep 2014 17:26:19 -0600 Subject: PCI: keystone: Add TI Keystone PCIe driver The Keystone PCIe controller is based on v3.65 version of the Designware h/w. Main differences are: 1. No ATU support 2. Legacy and MSI IRQ functions are implemented in application register space 3. MSI interrupts are multiplexed over 8 IRQ lines to the Host side. All of the application register space handing code is organized into pci-keystone-dw.c and the functions are called from pci-keystone.c to implement PCI controller driver. Also add necessary DT documentation and update the MAINTAINERS file for the driver. [bhelgaas: spelling and whitespace fixes] Signed-off-by: Murali Karicheri Signed-off-by: Bjorn Helgaas Acked-by: Santosh Shilimkar CC: Russell King CC: Grant Likely CC: Rob Herring CC: Mohit Kumar CC: Pratyush Anand CC: Jingoo Han CC: Richard Zhu CC: Kishon Vijay Abraham I CC: Marek Vasut CC: Arnd Bergmann CC: Pawel Moll CC: Mark Rutland CC: Ian Campbell CC: Kumar Gala CC: Randy Dunlap CC: Grant Likely --- .../devicetree/bindings/pci/pci-keystone.txt | 68 +++ MAINTAINERS | 7 + drivers/pci/host/Kconfig | 10 + drivers/pci/host/Makefile | 1 + drivers/pci/host/pci-keystone-dw.c | 516 +++++++++++++++++++++ drivers/pci/host/pci-keystone.c | 386 +++++++++++++++ drivers/pci/host/pci-keystone.h | 58 +++ 7 files changed, 1046 insertions(+) create mode 100644 Documentation/devicetree/bindings/pci/pci-keystone.txt create mode 100644 drivers/pci/host/pci-keystone-dw.c create mode 100644 drivers/pci/host/pci-keystone.c create mode 100644 drivers/pci/host/pci-keystone.h (limited to 'MAINTAINERS') diff --git a/Documentation/devicetree/bindings/pci/pci-keystone.txt b/Documentation/devicetree/bindings/pci/pci-keystone.txt new file mode 100644 index 000000000000..ceb3e2424742 --- /dev/null +++ b/Documentation/devicetree/bindings/pci/pci-keystone.txt @@ -0,0 +1,68 @@ +TI Keystone PCIe interface + +Keystone PCI host Controller is based on Designware PCI h/w version 3.65. +It shares common functions with PCIe Designware core driver and inherit +common properties defined in +Documentation/devicetree/bindings/pci/designware-pci.txt + +Please refer to Documentation/devicetree/bindings/pci/designware-pci.txt +for the details of Designware DT bindings. Additional properties are +described here as well as properties that are not applicable. + +Required Properties:- + +compatibility: "ti,keystone-pcie" +reg: index 1 is the base address and length of DW application registers. + index 2 is the base address and length of PCI mode configuration + register. + index 3 is the base address and length of PCI device ID register. + +pcie_msi_intc : Interrupt controller device node for MSI IRQ chip + interrupt-cells: should be set to 1 + interrupt-parent: Parent interrupt controller phandle + interrupts: GIC interrupt lines connected to PCI MSI interrupt lines + + Example: + pcie_msi_intc: msi-interrupt-controller { + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&gic>; + interrupts = , + , + , + , + , + , + , + ; + }; + +pcie_intc: Interrupt controller device node for Legacy IRQ chip + interrupt-cells: should be set to 1 + interrupt-parent: Parent interrupt controller phandle + interrupts: GIC interrupt lines connected to PCI Legacy interrupt lines + + Example: + pcie_intc: legacy-interrupt-controller { + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&gic>; + interrupts = , + , + , + ; + }; + +Optional properties:- + phys: phandle to Generic Keystone SerDes phy for PCI + phy-names: name of the Generic Keystine SerDes phy for PCI + - If boot loader already does PCI link establishment, then phys and + phy-names shouldn't be present. + +Designware DT Properties not applicable for Keystone PCI + +1. pcie_bus clock-names not used. Instead, a phandle to phys is used. + +Note for PCI driver usage +========================= +Driver requires pci=pcie_bus_perf in the bootargs for proper functioning. diff --git a/MAINTAINERS b/MAINTAINERS index 1ff06dee651d..d639df9701cf 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6876,6 +6876,13 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: drivers/pci/host/*imx6* +PCI DRIVER FOR TI KEYSTONE +M: Murali Karicheri +L: linux-pci@vger.kernel.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: drivers/pci/host/*keystone* + PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support) M: Thomas Petazzoni M: Jason Cooper diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig index 8922c376456a..3fe8bf627f70 100644 --- a/drivers/pci/host/Kconfig +++ b/drivers/pci/host/Kconfig @@ -63,4 +63,14 @@ config PCIE_SPEAR13XX help Say Y here if you want PCIe support on SPEAr13XX SoCs. +config PCI_KEYSTONE + bool "TI Keystone PCIe controller" + depends on ARCH_KEYSTONE + select PCIE_DW + select PCIEPORTBUS + help + Say Y here if you want to enable PCI controller support on Keystone + SoCs. The PCI controller on Keystone is based on Designware hardware + and therefore the driver re-uses the Designware core functions to + implement the driver. endmenu diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile index d0e88f114ff9..057ea60f5cdd 100644 --- a/drivers/pci/host/Makefile +++ b/drivers/pci/host/Makefile @@ -8,3 +8,4 @@ obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o obj-$(CONFIG_PCI_RCAR_GEN2_PCIE) += pcie-rcar.o obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o +obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c new file mode 100644 index 000000000000..2434786ca40b --- /dev/null +++ b/drivers/pci/host/pci-keystone-dw.c @@ -0,0 +1,516 @@ +/* + * Designware application register space functions for Keystone PCI controller + * + * Copyright (C) 2013-2014 Texas Instruments., Ltd. + * http://www.ti.com + * + * Author: Murali Karicheri + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "pcie-designware.h" +#include "pci-keystone.h" + +/* Application register defines */ +#define LTSSM_EN_VAL 1 +#define LTSSM_STATE_MASK 0x1f +#define LTSSM_STATE_L0 0x11 +#define DBI_CS2_EN_VAL 0x20 +#define OB_XLAT_EN_VAL 2 + +/* Application registers */ +#define CMD_STATUS 0x004 +#define CFG_SETUP 0x008 +#define OB_SIZE 0x030 +#define CFG_PCIM_WIN_SZ_IDX 3 +#define CFG_PCIM_WIN_CNT 32 +#define SPACE0_REMOTE_CFG_OFFSET 0x1000 +#define OB_OFFSET_INDEX(n) (0x200 + (8 * n)) +#define OB_OFFSET_HI(n) (0x204 + (8 * n)) + +/* IRQ register defines */ +#define IRQ_EOI 0x050 +#define IRQ_STATUS 0x184 +#define IRQ_ENABLE_SET 0x188 +#define IRQ_ENABLE_CLR 0x18c + +#define MSI_IRQ 0x054 +#define MSI0_IRQ_STATUS 0x104 +#define MSI0_IRQ_ENABLE_SET 0x108 +#define MSI0_IRQ_ENABLE_CLR 0x10c +#define IRQ_STATUS 0x184 +#define MSI_IRQ_OFFSET 4 + +/* Config space registers */ +#define DEBUG0 0x728 + +#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp) + +static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys) +{ + return sys->private_data; +} + +static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset, + u32 *bit_pos) +{ + *reg_offset = offset % 8; + *bit_pos = offset >> 3; +} + +u32 ks_dw_pcie_get_msi_data(struct pcie_port *pp) +{ + struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); + + return ks_pcie->app.start + MSI_IRQ; +} + +void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset) +{ + struct pcie_port *pp = &ks_pcie->pp; + u32 pending, vector; + int src, virq; + + pending = readl(ks_pcie->va_app_base + MSI0_IRQ_STATUS + (offset << 4)); + + /* + * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit + * shows 1, 9, 17, 25 and so forth + */ + for (src = 0; src < 4; src++) { + if (BIT(src) & pending) { + vector = offset + (src << 3); + virq = irq_linear_revmap(pp->irq_domain, vector); + dev_dbg(pp->dev, "irq: bit %d, vector %d, virq %d\n", + src, vector, virq); + generic_handle_irq(virq); + } + } +} + +static void ks_dw_pcie_msi_irq_ack(struct irq_data *d) +{ + u32 offset, reg_offset, bit_pos; + struct keystone_pcie *ks_pcie; + unsigned int irq = d->irq; + struct msi_desc *msi; + struct pcie_port *pp; + + msi = irq_get_msi_desc(irq); + pp = sys_to_pcie(msi->dev->bus->sysdata); + ks_pcie = to_keystone_pcie(pp); + offset = irq - irq_linear_revmap(pp->irq_domain, 0); + update_reg_offset_bit_pos(offset, ®_offset, &bit_pos); + + writel(BIT(bit_pos), + ks_pcie->va_app_base + MSI0_IRQ_STATUS + (reg_offset << 4)); + writel(reg_offset + MSI_IRQ_OFFSET, ks_pcie->va_app_base + IRQ_EOI); +} + +void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq) +{ + u32 reg_offset, bit_pos; + struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); + + update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); + writel(BIT(bit_pos), + ks_pcie->va_app_base + MSI0_IRQ_ENABLE_SET + (reg_offset << 4)); +} + +void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq) +{ + u32 reg_offset, bit_pos; + struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); + + update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); + writel(BIT(bit_pos), + ks_pcie->va_app_base + MSI0_IRQ_ENABLE_CLR + (reg_offset << 4)); +} + +static void ks_dw_pcie_msi_irq_mask(struct irq_data *d) +{ + struct keystone_pcie *ks_pcie; + unsigned int irq = d->irq; + struct msi_desc *msi; + struct pcie_port *pp; + u32 offset; + + msi = irq_get_msi_desc(irq); + pp = sys_to_pcie(msi->dev->bus->sysdata); + ks_pcie = to_keystone_pcie(pp); + offset = irq - irq_linear_revmap(pp->irq_domain, 0); + + /* Mask the end point if PVM implemented */ + if (IS_ENABLED(CONFIG_PCI_MSI)) { + if (msi->msi_attrib.maskbit) + mask_msi_irq(d); + } + + ks_dw_pcie_msi_clear_irq(pp, offset); +} + +static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d) +{ + struct keystone_pcie *ks_pcie; + unsigned int irq = d->irq; + struct msi_desc *msi; + struct pcie_port *pp; + u32 offset; + + msi = irq_get_msi_desc(irq); + pp = sys_to_pcie(msi->dev->bus->sysdata); + ks_pcie = to_keystone_pcie(pp); + offset = irq - irq_linear_revmap(pp->irq_domain, 0); + + /* Mask the end point if PVM implemented */ + if (IS_ENABLED(CONFIG_PCI_MSI)) { + if (msi->msi_attrib.maskbit) + unmask_msi_irq(d); + } + + ks_dw_pcie_msi_set_irq(pp, offset); +} + +static struct irq_chip ks_dw_pcie_msi_irq_chip = { + .name = "Keystone-PCIe-MSI-IRQ", + .irq_ack = ks_dw_pcie_msi_irq_ack, + .irq_mask = ks_dw_pcie_msi_irq_mask, + .irq_unmask = ks_dw_pcie_msi_irq_unmask, +}; + +static int ks_dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &ks_dw_pcie_msi_irq_chip, + handle_level_irq); + irq_set_chip_data(irq, domain->host_data); + set_irq_flags(irq, IRQF_VALID); + + return 0; +} + +const struct irq_domain_ops ks_dw_pcie_msi_domain_ops = { + .map = ks_dw_pcie_msi_map, +}; + +int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_chip *chip) +{ + struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); + int i; + + pp->irq_domain = irq_domain_add_linear(ks_pcie->msi_intc_np, + MAX_MSI_IRQS, + &ks_dw_pcie_msi_domain_ops, + chip); + if (!pp->irq_domain) { + dev_err(pp->dev, "irq domain init failed\n"); + return -ENXIO; + } + + for (i = 0; i < MAX_MSI_IRQS; i++) + irq_create_mapping(pp->irq_domain, i); + + return 0; +} + +void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie) +{ + int i; + + for (i = 0; i < MAX_LEGACY_IRQS; i++) + writel(0x1, ks_pcie->va_app_base + IRQ_ENABLE_SET + (i << 4)); +} + +void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset) +{ + struct pcie_port *pp = &ks_pcie->pp; + u32 pending; + int virq; + + pending = readl(ks_pcie->va_app_base + IRQ_STATUS + (offset << 4)); + + if (BIT(0) & pending) { + virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset); + dev_dbg(pp->dev, ": irq: irq_offset %d, virq %d\n", offset, + virq); + generic_handle_irq(virq); + } + + /* EOI the INTx interrupt */ + writel(offset, ks_pcie->va_app_base + IRQ_EOI); +} + +static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d) +{ +} + +static void ks_dw_pcie_mask_legacy_irq(struct irq_data *d) +{ +} + +static void ks_dw_pcie_unmask_legacy_irq(struct irq_data *d) +{ +} + +static struct irq_chip ks_dw_pcie_legacy_irq_chip = { + .name = "Keystone-PCI-Legacy-IRQ", + .irq_ack = ks_dw_pcie_ack_legacy_irq, + .irq_mask = ks_dw_pcie_mask_legacy_irq, + .irq_unmask = ks_dw_pcie_unmask_legacy_irq, +}; + +static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d, + unsigned int irq, irq_hw_number_t hw_irq) +{ + irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip, + handle_level_irq); + irq_set_chip_data(irq, d->host_data); + set_irq_flags(irq, IRQF_VALID); + + return 0; +} + +static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = { + .map = ks_dw_pcie_init_legacy_irq_map, + .xlate = irq_domain_xlate_onetwocell, +}; + +/** + * ks_dw_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask + * registers + * + * Since modification of dbi_cs2 involves different clock domain, read the + * status back to ensure the transition is complete. + */ +static void ks_dw_pcie_set_dbi_mode(void __iomem *reg_virt) +{ + u32 val; + + writel(DBI_CS2_EN_VAL | readl(reg_virt + CMD_STATUS), + reg_virt + CMD_STATUS); + + do { + val = readl(reg_virt + CMD_STATUS); + } while (!(val & DBI_CS2_EN_VAL)); +} + +/** + * ks_dw_pcie_clear_dbi_mode() - Disable DBI mode + * + * Since modification of dbi_cs2 involves different clock domain, read the + * status back to ensure the transition is complete. + */ +static void ks_dw_pcie_clear_dbi_mode(void __iomem *reg_virt) +{ + u32 val; + + writel(~DBI_CS2_EN_VAL & readl(reg_virt + CMD_STATUS), + reg_virt + CMD_STATUS); + + do { + val = readl(reg_virt + CMD_STATUS); + } while (val & DBI_CS2_EN_VAL); +} + +void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) +{ + struct pcie_port *pp = &ks_pcie->pp; + u32 start = pp->mem.start, end = pp->mem.end; + int i, tr_size; + + /* Disable BARs for inbound access */ + ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base); + writel(0, pp->dbi_base + PCI_BASE_ADDRESS_0); + writel(0, pp->dbi_base + PCI_BASE_ADDRESS_1); + ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base); + + /* Set outbound translation size per window division */ + writel(CFG_PCIM_WIN_SZ_IDX & 0x7, ks_pcie->va_app_base + OB_SIZE); + + tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M; + + /* Using Direct 1:1 mapping of RC <-> PCI memory space */ + for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) { + writel(start | 1, ks_pcie->va_app_base + OB_OFFSET_INDEX(i)); + writel(0, ks_pcie->va_app_base + OB_OFFSET_HI(i)); + start += tr_size; + } + + /* Enable OB translation */ + writel(OB_XLAT_EN_VAL | readl(ks_pcie->va_app_base + CMD_STATUS), + ks_pcie->va_app_base + CMD_STATUS); +} + +/** + * ks_pcie_cfg_setup() - Set up configuration space address for a device + * + * @ks_pcie: ptr to keystone_pcie structure + * @bus: Bus number the device is residing on + * @devfn: device, function number info + * + * Forms and returns the address of configuration space mapped in PCIESS + * address space 0. Also configures CFG_SETUP for remote configuration space + * access. + * + * The address space has two regions to access configuration - local and remote. + * We access local region for bus 0 (as RC is attached on bus 0) and remote + * region for others with TYPE 1 access when bus > 1. As for device on bus = 1, + * we will do TYPE 0 access as it will be on our secondary bus (logical). + * CFG_SETUP is needed only for remote configuration access. + */ +static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus, + unsigned int devfn) +{ + u8 device = PCI_SLOT(devfn), function = PCI_FUNC(devfn); + struct pcie_port *pp = &ks_pcie->pp; + u32 regval; + + if (bus == 0) + return pp->dbi_base; + + regval = (bus << 16) | (device << 8) | function; + + /* + * Since Bus#1 will be a virtual bus, we need to have TYPE0 + * access only. + * TYPE 1 + */ + if (bus != 1) + regval |= BIT(24); + + writel(regval, ks_pcie->va_app_base + CFG_SETUP); + return pp->va_cfg0_base; +} + +int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, + unsigned int devfn, int where, int size, u32 *val) +{ + struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); + u8 bus_num = bus->number; + void __iomem *addr; + + addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn); + + return dw_pcie_cfg_read(addr + (where & ~0x3), where, size, val); +} + +int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, + unsigned int devfn, int where, int size, u32 val) +{ + struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); + u8 bus_num = bus->number; + void __iomem *addr; + + addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn); + + return dw_pcie_cfg_write(addr + (where & ~0x3), where, size, val); +} + +/** + * ks_dw_pcie_v3_65_scan_bus() - keystone scan_bus post initialization + * + * This sets BAR0 to enable inbound access for MSI_IRQ register + */ +void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp) +{ + struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); + + /* Configure and set up BAR0 */ + ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base); + + /* Enable BAR0 */ + writel(1, pp->dbi_base + PCI_BASE_ADDRESS_0); + writel(SZ_4K - 1, pp->dbi_base + PCI_BASE_ADDRESS_0); + + ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base); + + /* + * For BAR0, just setting bus address for inbound writes (MSI) should + * be sufficient. Use physical address to avoid any conflicts. + */ + writel(ks_pcie->app.start, pp->dbi_base + PCI_BASE_ADDRESS_0); +} + +/** + * ks_dw_pcie_link_up() - Check if link up + */ +int ks_dw_pcie_link_up(struct pcie_port *pp) +{ + u32 val = readl(pp->dbi_base + DEBUG0); + + return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0; +} + +void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie) +{ + u32 val; + + /* Disable Link training */ + val = readl(ks_pcie->va_app_base + CMD_STATUS); + val &= ~LTSSM_EN_VAL; + writel(LTSSM_EN_VAL | val, ks_pcie->va_app_base + CMD_STATUS); + + /* Initiate Link Training */ + val = readl(ks_pcie->va_app_base + CMD_STATUS); + writel(LTSSM_EN_VAL | val, ks_pcie->va_app_base + CMD_STATUS); +} + +/** + * ks_dw_pcie_host_init() - initialize host for v3_65 dw hardware + * + * Ioremap the register resources, initialize legacy irq domain + * and call dw_pcie_v3_65_host_init() API to initialize the Keystone + * PCI host controller. + */ +int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie, + struct device_node *msi_intc_np) +{ + struct pcie_port *pp = &ks_pcie->pp; + struct platform_device *pdev = to_platform_device(pp->dev); + struct resource *res; + + /* Index 0 is the config reg. space address */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pp->dbi_base = devm_ioremap_resource(pp->dev, res); + if (IS_ERR(pp->dbi_base)) + return PTR_ERR(pp->dbi_base); + + /* + * We set these same and is used in pcie rd/wr_other_conf + * functions + */ + pp->va_cfg0_base = pp->dbi_base + SPACE0_REMOTE_CFG_OFFSET; + pp->va_cfg1_base = pp->va_cfg0_base; + + /* Index 1 is the application reg. space address */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + ks_pcie->app = *res; + ks_pcie->va_app_base = devm_ioremap_resource(pp->dev, res); + if (IS_ERR(ks_pcie->va_app_base)) + return PTR_ERR(ks_pcie->va_app_base); + + /* Create legacy IRQ domain */ + ks_pcie->legacy_irq_domain = + irq_domain_add_linear(ks_pcie->legacy_intc_np, + MAX_LEGACY_IRQS, + &ks_dw_pcie_legacy_irq_domain_ops, + NULL); + if (!ks_pcie->legacy_irq_domain) { + dev_err(pp->dev, "Failed to add irq domain for legacy irqs\n"); + return -EINVAL; + } + + return dw_pcie_host_init(pp); +} diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c new file mode 100644 index 000000000000..c1cfaef7faf3 --- /dev/null +++ b/drivers/pci/host/pci-keystone.c @@ -0,0 +1,386 @@ +/* + * PCIe host controller driver for Texas Instruments Keystone SoCs + * + * Copyright (C) 2013-2014 Texas Instruments., Ltd. + * http://www.ti.com + * + * Author: Murali Karicheri + * Implementation based on pci-exynos.c and pcie-designware.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pcie-designware.h" +#include "pci-keystone.h" + +#define DRIVER_NAME "keystone-pcie" + +/* driver specific constants */ +#define MAX_MSI_HOST_IRQS 8 +#define MAX_LEGACY_HOST_IRQS 4 + +/* RC mode settings masks */ +#define PCIE_RC_MODE BIT(2) +#define PCIE_MODE_MASK (BIT(1) | BIT(2)) + +/* DEV_STAT_CTRL */ +#define PCIE_CAP_BASE 0x70 + +#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp) + +static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie) +{ + struct pcie_port *pp = &ks_pcie->pp; + int count = 200; + + dw_pcie_setup_rc(pp); + + if (dw_pcie_link_up(pp)) { + dev_err(pp->dev, "Link already up\n"); + return 0; + } + + ks_dw_pcie_initiate_link_train(ks_pcie); + /* check if the link is up or not */ + while (!dw_pcie_link_up(pp)) { + usleep_range(100, 1000); + if (--count) { + ks_dw_pcie_initiate_link_train(ks_pcie); + continue; + } + dev_err(pp->dev, "phy link never came up\n"); + return -EINVAL; + } + + return 0; +} + +static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc) +{ + struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); + u32 offset = irq - ks_pcie->msi_host_irqs[0]; + struct pcie_port *pp = &ks_pcie->pp; + struct irq_chip *chip = irq_desc_get_chip(desc); + + dev_dbg(pp->dev, "ks_pci_msi_irq_handler, irq %d\n", irq); + + /* + * The chained irq handler installation would have replaced normal + * interrupt driver handler so we need to take care of mask/unmask and + * ack operation. + */ + chained_irq_enter(chip, desc); + ks_dw_pcie_handle_msi_irq(ks_pcie, offset); + chained_irq_exit(chip, desc); +} + +/** + * ks_pcie_legacy_irq_handler() - Handle legacy interrupt + * @irq: IRQ line for legacy interrupts + * @desc: Pointer to irq descriptor + * + * Traverse through pending legacy interrupts and invoke handler for each. Also + * takes care of interrupt controller level mask/ack operation. + */ +static void ks_pcie_legacy_irq_handler(unsigned int irq, struct irq_desc *desc) +{ + struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); + struct pcie_port *pp = &ks_pcie->pp; + u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0]; + struct irq_chip *chip = irq_desc_get_chip(desc); + + dev_dbg(pp->dev, ": Handling legacy irq %d\n", irq); + + /* + * The chained irq handler installation would have replaced normal + * interrupt driver handler so we need to take care of mask/unmask and + * ack operation. + */ + chained_irq_enter(chip, desc); + ks_dw_pcie_handle_legacy_irq(ks_pcie, irq_offset); + chained_irq_exit(chip, desc); +} + +static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, + char *controller, int *num_irqs) +{ + int temp, max_host_irqs, legacy = 1, *host_irqs, ret = -EINVAL; + struct device *dev = ks_pcie->pp.dev; + struct device_node *np_pcie = dev->of_node, **np_temp; + + if (!strcmp(controller, "msi-interrupt-controller")) + legacy = 0; + + if (legacy) { + np_temp = &ks_pcie->legacy_intc_np; + max_host_irqs = MAX_LEGACY_HOST_IRQS; + host_irqs = &ks_pcie->legacy_host_irqs[0]; + } else { + np_temp = &ks_pcie->msi_intc_np; + max_host_irqs = MAX_MSI_HOST_IRQS; + host_irqs = &ks_pcie->msi_host_irqs[0]; + } + + /* interrupt controller is in a child node */ + *np_temp = of_find_node_by_name(np_pcie, controller); + if (!(*np_temp)) { + dev_err(dev, "Node for %s is absent\n", controller); + goto out; + } + temp = of_irq_count(*np_temp); + if (!temp) + goto out; + if (temp > max_host_irqs) + dev_warn(dev, "Too many %s interrupts defined %u\n", + (legacy ? "legacy" : "MSI"), temp); + + /* + * support upto max_host_irqs. In dt from index 0 to 3 (legacy) or 0 to + * 7 (MSI) + */ + for (temp = 0; temp < max_host_irqs; temp++) { + host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp); + if (host_irqs[temp] < 0) + break; + } + if (temp) { + *num_irqs = temp; + ret = 0; + } +out: + return ret; +} + +static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie) +{ + int i; + + /* Legacy IRQ */ + for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) { + irq_set_handler_data(ks_pcie->legacy_host_irqs[i], ks_pcie); + irq_set_chained_handler(ks_pcie->legacy_host_irqs[i], + ks_pcie_legacy_irq_handler); + } + ks_dw_pcie_enable_legacy_irqs(ks_pcie); + + /* MSI IRQ */ + if (IS_ENABLED(CONFIG_PCI_MSI)) { + for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) { + irq_set_chained_handler(ks_pcie->msi_host_irqs[i], + ks_pcie_msi_irq_handler); + irq_set_handler_data(ks_pcie->msi_host_irqs[i], + ks_pcie); + } + } +} + +/* + * When a PCI device does not exist during config cycles, keystone host gets a + * bus error instead of returning 0xffffffff. This handler always returns 0 + * for this kind of faults. + */ +static int keystone_pcie_fault(unsigned long addr, unsigned int fsr, + struct pt_regs *regs) +{ + unsigned long instr = *(unsigned long *) instruction_pointer(regs); + + if ((instr & 0x0e100090) == 0x00100090) { + int reg = (instr >> 12) & 15; + + regs->uregs[reg] = -1; + regs->ARM_pc += 4; + } + + return 0; +} + +static void __init ks_pcie_host_init(struct pcie_port *pp) +{ + u32 vendor_device_id, val; + struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); + + ks_pcie_establish_link(ks_pcie); + ks_dw_pcie_setup_rc_app_regs(ks_pcie); + ks_pcie_setup_interrupts(ks_pcie); + writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8), + pp->dbi_base + PCI_IO_BASE); + + /* update the Vendor ID */ + vendor_device_id = readl(ks_pcie->va_reg_pciid); + writew((vendor_device_id >> 16), pp->dbi_base + PCI_DEVICE_ID); + + /* update the DEV_STAT_CTRL to publish right mrrs */ + val = readl(pp->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL); + val &= ~PCI_EXP_DEVCTL_READRQ; + /* set the mrrs to 256 bytes */ + val |= BIT(12); + writel(val, pp->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL); + + /* + * PCIe access errors that result into OCP errors are caught by ARM as + * "External aborts" + */ + hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0, + "Asynchronous external abort"); +} + +static struct pcie_host_ops keystone_pcie_host_ops = { + .rd_other_conf = ks_dw_pcie_rd_other_conf, + .wr_other_conf = ks_dw_pcie_wr_other_conf, + .link_up = ks_dw_pcie_link_up, + .host_init = ks_pcie_host_init, + .msi_set_irq = ks_dw_pcie_msi_set_irq, + .msi_clear_irq = ks_dw_pcie_msi_clear_irq, + .get_msi_data = ks_dw_pcie_get_msi_data, + .msi_host_init = ks_dw_pcie_msi_host_init, + .scan_bus = ks_dw_pcie_v3_65_scan_bus, +}; + +static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie, + struct platform_device *pdev) +{ + struct pcie_port *pp = &ks_pcie->pp; + int ret; + + ret = ks_pcie_get_irq_controller_info(ks_pcie, + "legacy-interrupt-controller", + &ks_pcie->num_legacy_host_irqs); + if (ret) + return ret; + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + ret = ks_pcie_get_irq_controller_info(ks_pcie, + "msi-interrupt-controller", + &ks_pcie->num_msi_host_irqs); + if (ret) + return ret; + } + + pp->root_bus_nr = -1; + pp->ops = &keystone_pcie_host_ops; + ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np); + if (ret) { + dev_err(&pdev->dev, "failed to initialize host\n"); + return ret; + } + + return ret; +} + +static const struct of_device_id ks_pcie_of_match[] = { + { + .type = "pci", + .compatible = "ti,keystone-pcie", + }, + { }, +}; +MODULE_DEVICE_TABLE(of, ks_pcie_of_match); + +static int __exit ks_pcie_remove(struct platform_device *pdev) +{ + struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev); + + clk_disable_unprepare(ks_pcie->clk); + + return 0; +} + +static int __init ks_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct keystone_pcie *ks_pcie; + struct pcie_port *pp; + struct resource *res; + void __iomem *reg_p; + struct phy *phy; + int ret = 0; + u32 val; + + ks_pcie = devm_kzalloc(&pdev->dev, sizeof(*ks_pcie), + GFP_KERNEL); + if (!ks_pcie) { + dev_err(dev, "no memory for keystone pcie\n"); + return -ENOMEM; + } + pp = &ks_pcie->pp; + + /* index 2 is the devcfg register for RC mode settings */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 2); + reg_p = devm_ioremap_resource(dev, res); + if (IS_ERR(reg_p)) + return PTR_ERR(reg_p); + + /* enable RC mode in devcfg */ + val = readl(reg_p); + val &= ~PCIE_MODE_MASK; + val |= PCIE_RC_MODE; + writel(val, reg_p); + + /* initialize SerDes Phy if present */ + phy = devm_phy_get(dev, "pcie-phy"); + if (!IS_ERR_OR_NULL(phy)) { + ret = phy_init(phy); + if (ret < 0) + return ret; + } + + /* index 3 is to read PCI DEVICE_ID */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 3); + reg_p = devm_ioremap_resource(dev, res); + if (IS_ERR(reg_p)) + return PTR_ERR(reg_p); + ks_pcie->va_reg_pciid = reg_p; + + pp->dev = dev; + platform_set_drvdata(pdev, ks_pcie); + ks_pcie->clk = devm_clk_get(dev, "pcie"); + if (IS_ERR(ks_pcie->clk)) { + dev_err(dev, "Failed to get pcie rc clock\n"); + return PTR_ERR(ks_pcie->clk); + } + ret = clk_prepare_enable(ks_pcie->clk); + if (ret) + return ret; + + ret = ks_add_pcie_port(ks_pcie, pdev); + if (ret < 0) + goto fail_clk; + + return 0; +fail_clk: + clk_disable_unprepare(ks_pcie->clk); + + return ret; +} + +static struct platform_driver ks_pcie_driver __refdata = { + .probe = ks_pcie_probe, + .remove = __exit_p(ks_pcie_remove), + .driver = { + .name = "keystone-pcie", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(ks_pcie_of_match), + }, +}; + +module_platform_driver(ks_pcie_driver); + +MODULE_AUTHOR("Murali Karicheri "); +MODULE_DESCRIPTION("Keystone PCIe host controller driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/host/pci-keystone.h b/drivers/pci/host/pci-keystone.h new file mode 100644 index 000000000000..729ea7d3994b --- /dev/null +++ b/drivers/pci/host/pci-keystone.h @@ -0,0 +1,58 @@ +/* + * Keystone PCI Controller's common includes + * + * Copyright (C) 2013-2014 Texas Instruments., Ltd. + * http://www.ti.com + * + * Author: Murali Karicheri + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define MAX_LEGACY_IRQS 4 +#define MAX_MSI_HOST_IRQS 8 +#define MAX_LEGACY_HOST_IRQS 4 + +struct keystone_pcie { + struct clk *clk; + struct pcie_port pp; + void __iomem *va_reg_pciid; + + int num_legacy_host_irqs; + int legacy_host_irqs[MAX_LEGACY_HOST_IRQS]; + struct device_node *legacy_intc_np; + + int num_msi_host_irqs; + int msi_host_irqs[MAX_MSI_HOST_IRQS]; + struct device_node *msi_intc_np; + struct irq_domain *legacy_irq_domain; + + /* Application register space */ + void __iomem *va_app_base; + struct resource app; +}; + +/* Keystone DW specific MSI controller APIs/definitions */ +void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset); +u32 ks_dw_pcie_get_msi_data(struct pcie_port *pp); + +/* Keystone specific PCI controller APIs */ +void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie); +void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset); +int ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie, + struct device_node *msi_intc_np); +int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, + unsigned int devfn, int where, int size, u32 val); +int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, + unsigned int devfn, int where, int size, u32 *val); +void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie); +int ks_dw_pcie_link_up(struct pcie_port *pp); +void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie); +void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq); +void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq); +void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp); +int ks_dw_pcie_msi_host_init(struct pcie_port *pp, + struct msi_chip *chip); -- cgit v1.2.3-59-g8ed1b From eb11adabcfa0019ce0a5f124d282f624d58b4376 Mon Sep 17 00:00:00 2001 From: Peter Griffin Date: Fri, 5 Sep 2014 16:36:32 +0100 Subject: MAINTAINERS: Add dwc3-st.c file to ARCH/STI architecture This patch adds the new dwc3-st.c glue driver found on STMicroelectronics stih407 consumer electronics SoC's into the STI arch section of the maintainers file. Signed-off-by: Peter Griffin Acked-by: Lee Jones Signed-off-by: Felipe Balbi --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index aefa94841ff3..4aa5cbc30c8e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1392,6 +1392,7 @@ F: drivers/media/rc/st_rc.c F: drivers/i2c/busses/i2c-st.c F: drivers/tty/serial/st-asc.c F: drivers/mmc/host/sdhci-st.c +F: drivers/usb/dwc3/dwc3-st.c ARM/TECHNOLOGIC SYSTEMS TS7250 MACHINE SUPPORT M: Lennert Buytenhek -- cgit v1.2.3-59-g8ed1b From c65fde192257d3007030c1aca5f4953235394e74 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Sun, 10 Aug 2014 05:16:39 -0300 Subject: [media] MAINTAINERS: add tw68 entry Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- MAINTAINERS | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index aefa94841ff3..78b38e9e5e0c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9227,6 +9227,14 @@ T: git git://linuxtv.org/media_tree.git S: Odd fixes F: drivers/media/usb/tm6000/ +TW68 VIDEO4LINUX DRIVER +M: Hans Verkuil +L: linux-media@vger.kernel.org +T: git git://linuxtv.org/media_tree.git +W: http://linuxtv.org +S: Odd Fixes +F: drivers/media/pci/tw68/ + TPM DEVICE DRIVER M: Peter Huewe M: Ashley Lai -- cgit v1.2.3-59-g8ed1b From 3a83c16ef0e03e2ca2f1ce547a7cba53a62d0e0d Mon Sep 17 00:00:00 2001 From: Andrzej Pietrasiewicz Date: Tue, 9 Sep 2014 02:02:09 +0300 Subject: usb: gadget: uvc: separately compile some components of f_uvc Compile uvc_queue, uvc_v4l2, uvc_video separately so that later they can be all combined in a separately compiled f_uvc. Signed-off-by: Andrzej Pietrasiewicz Tested-by: Michael Grzeschik [Make uvc_v4l2_ioctl_ops non-static] [Rename __UVC__V4L2__H__ and __UVC__VIDEO__H__] [Update MAINTAINERS] Signed-off-by: Laurent Pinchart Acked-by: Andrzej Pietrasiewicz Signed-off-by: Felipe Balbi --- MAINTAINERS | 2 +- drivers/usb/gadget/function/f_uvc.c | 2 ++ drivers/usb/gadget/function/f_uvc.h | 8 +++++++ drivers/usb/gadget/function/uvc.h | 1 + drivers/usb/gadget/function/uvc_queue.c | 39 ++++++++++++++------------------- drivers/usb/gadget/function/uvc_queue.h | 33 ++++++++++++++++++++++++++++ drivers/usb/gadget/function/uvc_v4l2.c | 6 +++-- drivers/usb/gadget/function/uvc_v4l2.h | 22 +++++++++++++++++++ drivers/usb/gadget/function/uvc_video.c | 10 ++++----- drivers/usb/gadget/function/uvc_video.h | 24 ++++++++++++++++++++ drivers/usb/gadget/legacy/Makefile | 2 +- drivers/usb/gadget/legacy/webcam.c | 6 +---- 12 files changed, 118 insertions(+), 37 deletions(-) create mode 100644 drivers/usb/gadget/function/uvc_v4l2.h create mode 100644 drivers/usb/gadget/function/uvc_video.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 5d66c037be18..96568a8f961b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9669,7 +9669,7 @@ USB WEBCAM GADGET M: Laurent Pinchart L: linux-usb@vger.kernel.org S: Maintained -F: drivers/usb/gadget/function/*uvc*.c +F: drivers/usb/gadget/function/*uvc* F: drivers/usb/gadget/legacy/webcam.c USB WIRELESS RNDIS DRIVER (rndis_wlan) diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c index ae5bcb49dddb..825080d271a7 100644 --- a/drivers/usb/gadget/function/f_uvc.c +++ b/drivers/usb/gadget/function/f_uvc.c @@ -27,6 +27,8 @@ #include #include "uvc.h" +#include "uvc_v4l2.h" +#include "uvc_video.h" unsigned int uvc_gadget_trace_param; static unsigned int streaming_interval; diff --git a/drivers/usb/gadget/function/f_uvc.h b/drivers/usb/gadget/function/f_uvc.h index 74b9602ef2d8..71b38dd54ede 100644 --- a/drivers/usb/gadget/function/f_uvc.h +++ b/drivers/usb/gadget/function/f_uvc.h @@ -16,6 +16,14 @@ #include #include +#include "uvc.h" + +void uvc_function_setup_continue(struct uvc_device *uvc); + +void uvc_function_connect(struct uvc_device *uvc); + +void uvc_function_disconnect(struct uvc_device *uvc); + int uvc_bind_config(struct usb_configuration *c, const struct uvc_descriptor_header * const *fs_control, const struct uvc_descriptor_header * const *hs_control, diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h index 0a283b1237d5..f67695cb28f8 100644 --- a/drivers/usb/gadget/function/uvc.h +++ b/drivers/usb/gadget/function/uvc.h @@ -53,6 +53,7 @@ struct uvc_event #ifdef __KERNEL__ #include /* For usb_endpoint_* */ +#include #include #include #include diff --git a/drivers/usb/gadget/function/uvc_queue.c b/drivers/usb/gadget/function/uvc_queue.c index 1e1bc73dba1f..8ea8b3b227b4 100644 --- a/drivers/usb/gadget/function/uvc_queue.c +++ b/drivers/usb/gadget/function/uvc_queue.c @@ -126,8 +126,7 @@ static struct vb2_ops uvc_queue_qops = { .wait_finish = uvc_wait_finish, }; -static int uvcg_queue_init(struct uvc_video_queue *queue, - enum v4l2_buf_type type) +int uvcg_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type) { int ret; @@ -154,7 +153,7 @@ static int uvcg_queue_init(struct uvc_video_queue *queue, /* * Free the video buffers. */ -static void uvcg_free_buffers(struct uvc_video_queue *queue) +void uvcg_free_buffers(struct uvc_video_queue *queue) { mutex_lock(&queue->mutex); vb2_queue_release(&queue->queue); @@ -164,7 +163,7 @@ static void uvcg_free_buffers(struct uvc_video_queue *queue) /* * Allocate the video buffers. */ -static int uvcg_alloc_buffers(struct uvc_video_queue *queue, +int uvcg_alloc_buffers(struct uvc_video_queue *queue, struct v4l2_requestbuffers *rb) { int ret; @@ -176,8 +175,7 @@ static int uvcg_alloc_buffers(struct uvc_video_queue *queue, return ret ? ret : rb->count; } -static int uvcg_query_buffer(struct uvc_video_queue *queue, - struct v4l2_buffer *buf) +int uvcg_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) { int ret; @@ -188,8 +186,7 @@ static int uvcg_query_buffer(struct uvc_video_queue *queue, return ret; } -static int uvcg_queue_buffer(struct uvc_video_queue *queue, - struct v4l2_buffer *buf) +int uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) { unsigned long flags; int ret; @@ -213,8 +210,8 @@ done: * Dequeue a video buffer. If nonblocking is false, block until a buffer is * available. */ -static int uvcg_dequeue_buffer(struct uvc_video_queue *queue, - struct v4l2_buffer *buf, int nonblocking) +int uvcg_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf, + int nonblocking) { int ret; @@ -231,8 +228,8 @@ static int uvcg_dequeue_buffer(struct uvc_video_queue *queue, * This function implements video queue polling and is intended to be used by * the device poll handler. */ -static unsigned int uvcg_queue_poll(struct uvc_video_queue *queue, - struct file *file, poll_table *wait) +unsigned int uvcg_queue_poll(struct uvc_video_queue *queue, struct file *file, + poll_table *wait) { unsigned int ret; @@ -243,8 +240,7 @@ static unsigned int uvcg_queue_poll(struct uvc_video_queue *queue, return ret; } -static int uvcg_queue_mmap(struct uvc_video_queue *queue, - struct vm_area_struct *vma) +int uvcg_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma) { int ret; @@ -261,9 +257,8 @@ static int uvcg_queue_mmap(struct uvc_video_queue *queue, * * NO-MMU arch need this function to make mmap() work correctly. */ -static unsigned long uvcg_queue_get_unmapped_area( - struct uvc_video_queue *queue, - unsigned long pgoff) +unsigned long uvcg_queue_get_unmapped_area(struct uvc_video_queue *queue, + unsigned long pgoff) { unsigned long ret; @@ -286,7 +281,7 @@ static unsigned long uvcg_queue_get_unmapped_area( * This function acquires the irq spinlock and can be called from interrupt * context. */ -static void uvcg_queue_cancel(struct uvc_video_queue *queue, int disconnect) +void uvcg_queue_cancel(struct uvc_video_queue *queue, int disconnect) { struct uvc_buffer *buf; unsigned long flags; @@ -327,7 +322,7 @@ static void uvcg_queue_cancel(struct uvc_video_queue *queue, int disconnect) * This function can't be called from interrupt context. Use * uvcg_queue_cancel() instead. */ -static int uvcg_queue_enable(struct uvc_video_queue *queue, int enable) +int uvcg_queue_enable(struct uvc_video_queue *queue, int enable) { unsigned long flags; int ret = 0; @@ -364,8 +359,8 @@ done: } /* called with &queue_irqlock held.. */ -static struct uvc_buffer *uvcg_queue_next_buffer(struct uvc_video_queue *queue, - struct uvc_buffer *buf) +struct uvc_buffer *uvcg_queue_next_buffer(struct uvc_video_queue *queue, + struct uvc_buffer *buf) { struct uvc_buffer *nextbuf; @@ -393,7 +388,7 @@ static struct uvc_buffer *uvcg_queue_next_buffer(struct uvc_video_queue *queue, return nextbuf; } -static struct uvc_buffer *uvcg_queue_head(struct uvc_video_queue *queue) +struct uvc_buffer *uvcg_queue_head(struct uvc_video_queue *queue) { struct uvc_buffer *buf = NULL; diff --git a/drivers/usb/gadget/function/uvc_queue.h b/drivers/usb/gadget/function/uvc_queue.h index 8e76ce982f1e..03919c724961 100644 --- a/drivers/usb/gadget/function/uvc_queue.h +++ b/drivers/usb/gadget/function/uvc_queue.h @@ -57,6 +57,39 @@ static inline int uvc_queue_streaming(struct uvc_video_queue *queue) return vb2_is_streaming(&queue->queue); } +int uvcg_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type); + +void uvcg_free_buffers(struct uvc_video_queue *queue); + +int uvcg_alloc_buffers(struct uvc_video_queue *queue, + struct v4l2_requestbuffers *rb); + +int uvcg_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf); + +int uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf); + +int uvcg_dequeue_buffer(struct uvc_video_queue *queue, + struct v4l2_buffer *buf, int nonblocking); + +unsigned int uvcg_queue_poll(struct uvc_video_queue *queue, + struct file *file, poll_table *wait); + +int uvcg_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma); + +#ifndef CONFIG_MMU +unsigned long uvcg_queue_get_unmapped_area(struct uvc_video_queue *queue, + unsigned long pgoff); +#endif /* CONFIG_MMU */ + +void uvcg_queue_cancel(struct uvc_video_queue *queue, int disconnect); + +int uvcg_queue_enable(struct uvc_video_queue *queue, int enable); + +struct uvc_buffer *uvcg_queue_next_buffer(struct uvc_video_queue *queue, + struct uvc_buffer *buf); + +struct uvc_buffer *uvcg_queue_head(struct uvc_video_queue *queue); + #endif /* __KERNEL__ */ #endif /* _UVC_QUEUE_H_ */ diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c index c85730eb36dd..b52f2681ec21 100644 --- a/drivers/usb/gadget/function/uvc_v4l2.c +++ b/drivers/usb/gadget/function/uvc_v4l2.c @@ -23,8 +23,10 @@ #include #include +#include "f_uvc.h" #include "uvc.h" #include "uvc_queue.h" +#include "uvc_video.h" /* -------------------------------------------------------------------------- * Requests handling @@ -259,7 +261,7 @@ uvc_v4l2_ioctl_default(struct file *file, void *fh, bool valid_prio, } } -static const struct v4l2_ioctl_ops uvc_v4l2_ioctl_ops = { +const struct v4l2_ioctl_ops uvc_v4l2_ioctl_ops = { .vidioc_querycap = uvc_v4l2_querycap, .vidioc_g_fmt_vid_out = uvc_v4l2_get_format, .vidioc_s_fmt_vid_out = uvc_v4l2_set_format, @@ -350,7 +352,7 @@ static unsigned long uvc_v4l2_get_unmapped_area(struct file *file, } #endif -static struct v4l2_file_operations uvc_v4l2_fops = { +struct v4l2_file_operations uvc_v4l2_fops = { .owner = THIS_MODULE, .open = uvc_v4l2_open, .release = uvc_v4l2_release, diff --git a/drivers/usb/gadget/function/uvc_v4l2.h b/drivers/usb/gadget/function/uvc_v4l2.h new file mode 100644 index 000000000000..2683b92fda65 --- /dev/null +++ b/drivers/usb/gadget/function/uvc_v4l2.h @@ -0,0 +1,22 @@ +/* + * uvc_v4l2.h -- USB Video Class Gadget driver + * + * Copyright (C) 2009-2010 + * Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * Copyright (c) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * Author: Andrzej Pietrasiewicz + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __UVC_V4L2_H__ +#define __UVC_V4L2_H__ + +extern const struct v4l2_ioctl_ops uvc_v4l2_ioctl_ops; +extern struct v4l2_file_operations uvc_v4l2_fops; + +#endif /* __UVC_V4L2_H__ */ diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c index be6749ec6a9d..b0528e8138cf 100644 --- a/drivers/usb/gadget/function/uvc_video.c +++ b/drivers/usb/gadget/function/uvc_video.c @@ -15,6 +15,7 @@ #include #include #include +#include #include @@ -279,8 +280,7 @@ error: * This function fills the available USB requests (listed in req_free) with * video data from the queued buffers. */ -static int -uvcg_video_pump(struct uvc_video *video) +int uvcg_video_pump(struct uvc_video *video) { struct uvc_video_queue *queue = &video->queue; struct usb_request *req; @@ -339,8 +339,7 @@ uvcg_video_pump(struct uvc_video *video) /* * Enable or disable the video stream. */ -static int -uvcg_video_enable(struct uvc_video *video, int enable) +int uvcg_video_enable(struct uvc_video *video, int enable) { unsigned int i; int ret; @@ -378,8 +377,7 @@ uvcg_video_enable(struct uvc_video *video, int enable) /* * Initialize the UVC video stream. */ -static int -uvcg_video_init(struct uvc_video *video) +int uvcg_video_init(struct uvc_video *video) { INIT_LIST_HEAD(&video->req_free); spin_lock_init(&video->req_lock); diff --git a/drivers/usb/gadget/function/uvc_video.h b/drivers/usb/gadget/function/uvc_video.h new file mode 100644 index 000000000000..ef00f06fa00b --- /dev/null +++ b/drivers/usb/gadget/function/uvc_video.h @@ -0,0 +1,24 @@ +/* + * uvc_video.h -- USB Video Class Gadget driver + * + * Copyright (C) 2009-2010 + * Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * Copyright (c) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * Author: Andrzej Pietrasiewicz + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __UVC_VIDEO_H__ +#define __UVC_VIDEO_H__ + +int uvcg_video_pump(struct uvc_video *video); + +int uvcg_video_enable(struct uvc_video *video, int enable); + +int uvcg_video_init(struct uvc_video *video); + +#endif /* __UVC_VIDEO_H__ */ diff --git a/drivers/usb/gadget/legacy/Makefile b/drivers/usb/gadget/legacy/Makefile index 7f485f25705e..ed7367e95eec 100644 --- a/drivers/usb/gadget/legacy/Makefile +++ b/drivers/usb/gadget/legacy/Makefile @@ -19,7 +19,7 @@ g_multi-y := multi.o g_hid-y := hid.o g_dbgp-y := dbgp.o g_nokia-y := nokia.o -g_webcam-y := webcam.o +g_webcam-y := webcam.o ../function/uvc_queue.o ../function/uvc_v4l2.o ../function/uvc_video.o g_ncm-y := ncm.o g_acm_ms-y := acm_ms.o g_tcm_usb_gadget-y := tcm_usb_gadget.o diff --git a/drivers/usb/gadget/legacy/webcam.c b/drivers/usb/gadget/legacy/webcam.c index f826622d1dc2..5d02849b1f67 100644 --- a/drivers/usb/gadget/legacy/webcam.c +++ b/drivers/usb/gadget/legacy/webcam.c @@ -12,10 +12,9 @@ #include #include +#include #include -#include "f_uvc.h" - /* * Kbuild is not very cooperative with respect to linking separately * compiled library objects into one module. So for now we won't use @@ -23,9 +22,6 @@ * the runtime footprint, and giving us at least some parts of what * a "gcc --combine ... part1.c part2.c part3.c ... " build would. */ -#include "uvc_queue.c" -#include "uvc_video.c" -#include "uvc_v4l2.c" #include "f_uvc.c" USB_GADGET_COMPOSITE_OPTIONS(); -- cgit v1.2.3-59-g8ed1b From 3d598f47e804a77208c6bb0a454123018e2f2281 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 19 Aug 2014 20:29:12 +0300 Subject: dmaengine: dw: move dw_dmac.h to where it belongs to There is a common storage for platform data related structures and definitions inside kernel source tree. The patch moves file from include/linux to include/linux/platform_data and renames it acoordingly. The users are also updated. Signed-off-by: Andy Shevchenko Acked-by: Viresh Kumar [For the arch/avr32/.* and .*sound/atmel.*] Acked-by: Hans-Christian Egtvedt Signed-off-by: Vinod Koul --- MAINTAINERS | 2 +- arch/avr32/mach-at32ap/at32ap700x.c | 2 +- arch/avr32/mach-at32ap/include/mach/atmel-mci.h | 2 +- drivers/dma/dw/internal.h | 2 +- drivers/dma/dw/regs.h | 6 +- include/linux/dw_dmac.h | 111 ------------------------ include/linux/platform_data/dma-dw.h | 111 ++++++++++++++++++++++++ include/sound/atmel-abdac.h | 2 +- include/sound/atmel-ac97c.h | 2 +- sound/atmel/abdac.c | 2 +- sound/atmel/ac97c.c | 2 +- 11 files changed, 122 insertions(+), 122 deletions(-) delete mode 100644 include/linux/dw_dmac.h create mode 100644 include/linux/platform_data/dma-dw.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index aefa94841ff3..a10072963889 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7875,7 +7875,7 @@ SYNOPSYS DESIGNWARE DMAC DRIVER M: Viresh Kumar M: Andy Shevchenko S: Maintained -F: include/linux/dw_dmac.h +F: include/linux/platform_data/dma-dw.h F: drivers/dma/dw/ SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c index db85b5ec3351..2a1aa712c6e7 100644 --- a/arch/avr32/mach-at32ap/at32ap700x.c +++ b/arch/avr32/mach-at32ap/at32ap700x.c @@ -7,7 +7,7 @@ */ #include #include -#include +#include #include #include #include diff --git a/arch/avr32/mach-at32ap/include/mach/atmel-mci.h b/arch/avr32/mach-at32ap/include/mach/atmel-mci.h index 4bba58561d5c..11d7f4b28dc8 100644 --- a/arch/avr32/mach-at32ap/include/mach/atmel-mci.h +++ b/arch/avr32/mach-at32ap/include/mach/atmel-mci.h @@ -1,7 +1,7 @@ #ifndef __MACH_ATMEL_MCI_H #define __MACH_ATMEL_MCI_H -#include +#include /** * struct mci_dma_data - DMA data for MCI interface diff --git a/drivers/dma/dw/internal.h b/drivers/dma/dw/internal.h index 32667f9e0dda..43cc1dfad5c9 100644 --- a/drivers/dma/dw/internal.h +++ b/drivers/dma/dw/internal.h @@ -12,7 +12,7 @@ #define _DW_DMAC_INTERNAL_H #include -#include +#include #include "regs.h" diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h index bb98d3e91e8b..af02439155e9 100644 --- a/drivers/dma/dw/regs.h +++ b/drivers/dma/dw/regs.h @@ -11,7 +11,7 @@ #include #include -#include +#include #define DW_DMA_MAX_NR_CHANNELS 8 #define DW_DMA_MAX_NR_REQUESTS 16 @@ -161,7 +161,7 @@ struct dw_dma_regs { #define DWC_CTLH_DONE 0x00001000 #define DWC_CTLH_BLOCK_TS_MASK 0x00000fff -/* Bitfields in CFG_LO. Platform-configurable bits are in */ +/* Bitfields in CFG_LO. Platform-configurable bits are in */ #define DWC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */ #define DWC_CFGL_CH_PRIOR(x) ((x) << 5) /* priority */ #define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */ @@ -172,7 +172,7 @@ struct dw_dma_regs { #define DWC_CFGL_RELOAD_SAR (1 << 30) #define DWC_CFGL_RELOAD_DAR (1 << 31) -/* Bitfields in CFG_HI. Platform-configurable bits are in */ +/* Bitfields in CFG_HI. Platform-configurable bits are in */ #define DWC_CFGH_DS_UPD_EN (1 << 5) #define DWC_CFGH_SS_UPD_EN (1 << 6) diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h deleted file mode 100644 index 68b4024184de..000000000000 --- a/include/linux/dw_dmac.h +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Driver for the Synopsys DesignWare DMA Controller - * - * Copyright (C) 2007 Atmel Corporation - * Copyright (C) 2010-2011 ST Microelectronics - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef DW_DMAC_H -#define DW_DMAC_H - -#include - -/** - * struct dw_dma_slave - Controller-specific information about a slave - * - * @dma_dev: required DMA master device. Depricated. - * @bus_id: name of this device channel, not just a device name since - * devices may have more than one channel e.g. "foo_tx" - * @cfg_hi: Platform-specific initializer for the CFG_HI register - * @cfg_lo: Platform-specific initializer for the CFG_LO register - * @src_master: src master for transfers on allocated channel. - * @dst_master: dest master for transfers on allocated channel. - */ -struct dw_dma_slave { - struct device *dma_dev; - u32 cfg_hi; - u32 cfg_lo; - u8 src_master; - u8 dst_master; -}; - -/** - * struct dw_dma_platform_data - Controller configuration parameters - * @nr_channels: Number of channels supported by hardware (max 8) - * @is_private: The device channels should be marked as private and not for - * by the general purpose DMA channel allocator. - * @chan_allocation_order: Allocate channels starting from 0 or 7 - * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. - * @block_size: Maximum block size supported by the controller - * @nr_masters: Number of AHB masters supported by the controller - * @data_width: Maximum data width supported by hardware per AHB master - * (0 - 8bits, 1 - 16bits, ..., 5 - 256bits) - */ -struct dw_dma_platform_data { - unsigned int nr_channels; - bool is_private; -#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ -#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ - unsigned char chan_allocation_order; -#define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */ -#define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */ - unsigned char chan_priority; - unsigned short block_size; - unsigned char nr_masters; - unsigned char data_width[4]; -}; - -/* bursts size */ -enum dw_dma_msize { - DW_DMA_MSIZE_1, - DW_DMA_MSIZE_4, - DW_DMA_MSIZE_8, - DW_DMA_MSIZE_16, - DW_DMA_MSIZE_32, - DW_DMA_MSIZE_64, - DW_DMA_MSIZE_128, - DW_DMA_MSIZE_256, -}; - -/* Platform-configurable bits in CFG_HI */ -#define DWC_CFGH_FCMODE (1 << 0) -#define DWC_CFGH_FIFO_MODE (1 << 1) -#define DWC_CFGH_PROTCTL(x) ((x) << 2) -#define DWC_CFGH_SRC_PER(x) ((x) << 7) -#define DWC_CFGH_DST_PER(x) ((x) << 11) - -/* Platform-configurable bits in CFG_LO */ -#define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */ -#define DWC_CFGL_LOCK_CH_BLOCK (1 << 12) -#define DWC_CFGL_LOCK_CH_XACT (2 << 12) -#define DWC_CFGL_LOCK_BUS_XFER (0 << 14) /* scope of LOCK_BUS */ -#define DWC_CFGL_LOCK_BUS_BLOCK (1 << 14) -#define DWC_CFGL_LOCK_BUS_XACT (2 << 14) -#define DWC_CFGL_LOCK_CH (1 << 15) /* channel lockout */ -#define DWC_CFGL_LOCK_BUS (1 << 16) /* busmaster lockout */ -#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ -#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ - -/* DMA API extensions */ -struct dw_cyclic_desc { - struct dw_desc **desc; - unsigned long periods; - void (*period_callback)(void *param); - void *period_callback_param; -}; - -struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, - dma_addr_t buf_addr, size_t buf_len, size_t period_len, - enum dma_transfer_direction direction); -void dw_dma_cyclic_free(struct dma_chan *chan); -int dw_dma_cyclic_start(struct dma_chan *chan); -void dw_dma_cyclic_stop(struct dma_chan *chan); - -dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan); - -dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan); - -#endif /* DW_DMAC_H */ diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h new file mode 100644 index 000000000000..68b4024184de --- /dev/null +++ b/include/linux/platform_data/dma-dw.h @@ -0,0 +1,111 @@ +/* + * Driver for the Synopsys DesignWare DMA Controller + * + * Copyright (C) 2007 Atmel Corporation + * Copyright (C) 2010-2011 ST Microelectronics + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef DW_DMAC_H +#define DW_DMAC_H + +#include + +/** + * struct dw_dma_slave - Controller-specific information about a slave + * + * @dma_dev: required DMA master device. Depricated. + * @bus_id: name of this device channel, not just a device name since + * devices may have more than one channel e.g. "foo_tx" + * @cfg_hi: Platform-specific initializer for the CFG_HI register + * @cfg_lo: Platform-specific initializer for the CFG_LO register + * @src_master: src master for transfers on allocated channel. + * @dst_master: dest master for transfers on allocated channel. + */ +struct dw_dma_slave { + struct device *dma_dev; + u32 cfg_hi; + u32 cfg_lo; + u8 src_master; + u8 dst_master; +}; + +/** + * struct dw_dma_platform_data - Controller configuration parameters + * @nr_channels: Number of channels supported by hardware (max 8) + * @is_private: The device channels should be marked as private and not for + * by the general purpose DMA channel allocator. + * @chan_allocation_order: Allocate channels starting from 0 or 7 + * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. + * @block_size: Maximum block size supported by the controller + * @nr_masters: Number of AHB masters supported by the controller + * @data_width: Maximum data width supported by hardware per AHB master + * (0 - 8bits, 1 - 16bits, ..., 5 - 256bits) + */ +struct dw_dma_platform_data { + unsigned int nr_channels; + bool is_private; +#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ +#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ + unsigned char chan_allocation_order; +#define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */ +#define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */ + unsigned char chan_priority; + unsigned short block_size; + unsigned char nr_masters; + unsigned char data_width[4]; +}; + +/* bursts size */ +enum dw_dma_msize { + DW_DMA_MSIZE_1, + DW_DMA_MSIZE_4, + DW_DMA_MSIZE_8, + DW_DMA_MSIZE_16, + DW_DMA_MSIZE_32, + DW_DMA_MSIZE_64, + DW_DMA_MSIZE_128, + DW_DMA_MSIZE_256, +}; + +/* Platform-configurable bits in CFG_HI */ +#define DWC_CFGH_FCMODE (1 << 0) +#define DWC_CFGH_FIFO_MODE (1 << 1) +#define DWC_CFGH_PROTCTL(x) ((x) << 2) +#define DWC_CFGH_SRC_PER(x) ((x) << 7) +#define DWC_CFGH_DST_PER(x) ((x) << 11) + +/* Platform-configurable bits in CFG_LO */ +#define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */ +#define DWC_CFGL_LOCK_CH_BLOCK (1 << 12) +#define DWC_CFGL_LOCK_CH_XACT (2 << 12) +#define DWC_CFGL_LOCK_BUS_XFER (0 << 14) /* scope of LOCK_BUS */ +#define DWC_CFGL_LOCK_BUS_BLOCK (1 << 14) +#define DWC_CFGL_LOCK_BUS_XACT (2 << 14) +#define DWC_CFGL_LOCK_CH (1 << 15) /* channel lockout */ +#define DWC_CFGL_LOCK_BUS (1 << 16) /* busmaster lockout */ +#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ +#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ + +/* DMA API extensions */ +struct dw_cyclic_desc { + struct dw_desc **desc; + unsigned long periods; + void (*period_callback)(void *param); + void *period_callback_param; +}; + +struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, + dma_addr_t buf_addr, size_t buf_len, size_t period_len, + enum dma_transfer_direction direction); +void dw_dma_cyclic_free(struct dma_chan *chan); +int dw_dma_cyclic_start(struct dma_chan *chan); +void dw_dma_cyclic_stop(struct dma_chan *chan); + +dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan); + +dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan); + +#endif /* DW_DMAC_H */ diff --git a/include/sound/atmel-abdac.h b/include/sound/atmel-abdac.h index edff6a8ba1b5..a8f735d677fa 100644 --- a/include/sound/atmel-abdac.h +++ b/include/sound/atmel-abdac.h @@ -10,7 +10,7 @@ #ifndef __INCLUDE_SOUND_ATMEL_ABDAC_H #define __INCLUDE_SOUND_ATMEL_ABDAC_H -#include +#include /** * struct atmel_abdac_pdata - board specific ABDAC configuration diff --git a/include/sound/atmel-ac97c.h b/include/sound/atmel-ac97c.h index 00e6c289a936..f2a1cdc37661 100644 --- a/include/sound/atmel-ac97c.h +++ b/include/sound/atmel-ac97c.h @@ -10,7 +10,7 @@ #ifndef __INCLUDE_SOUND_ATMEL_AC97C_H #define __INCLUDE_SOUND_ATMEL_AC97C_H -#include +#include #define AC97C_CAPTURE 0x01 #define AC97C_PLAYBACK 0x02 diff --git a/sound/atmel/abdac.c b/sound/atmel/abdac.c index edf2ca72d518..154a7c44e38d 100644 --- a/sound/atmel/abdac.c +++ b/sound/atmel/abdac.c @@ -9,7 +9,7 @@ */ #include #include -#include +#include #include #include #include diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c index a04d23174dc2..1dfb35afef8f 100644 --- a/sound/atmel/ac97c.c +++ b/sound/atmel/ac97c.c @@ -31,7 +31,7 @@ #include #include -#include +#include #include -- cgit v1.2.3-59-g8ed1b From 6970c34cea87ad54aab84e743970b84b1fdf1c7d Mon Sep 17 00:00:00 2001 From: Jukka Rissanen Date: Mon, 15 Sep 2014 11:03:36 +0300 Subject: MAINTAINERS: add maintainer for generic 6LoWPAN Add Jukka to 6LoWPAN maintainer list. He will concentrate on generic and bluetooth part of 6LoWPAN stack. Signed-off-by: Jukka Rissanen Acked-by: Alexander Aring Signed-off-by: Marcel Holtmann --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 0b7007a9c8a4..94ad1bab633a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -152,6 +152,7 @@ F: drivers/scsi/53c700* 6LOWPAN GENERIC (BTLE/IEEE 802.15.4) M: Alexander Aring +M: Jukka Rissanen L: linux-bluetooth@vger.kernel.org L: linux-wpan@vger.kernel.org S: Maintained -- cgit v1.2.3-59-g8ed1b From e076e96227ef6b5b66cfdd79e75401bd2f6d532f Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 4 Mar 2014 18:14:58 -0800 Subject: MAINTAINERS: add entry for the Broadcom BCM63xx ARM SoCs Add a MAINTAINERS entry covering all the Broadcom BCM63xx ARM DSL SoCs files along with the relevant git tree and mailing-list. Acked-by: Arnd Bergmann Signed-off-by: Florian Fainelli --- MAINTAINERS | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index aefa94841ff3..11d291550fb3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2021,6 +2021,14 @@ F: arch/arm/mach-bcm/bcm_5301x.c F: arch/arm/boot/dts/bcm5301x.dtsi F: arch/arm/boot/dts/bcm470* +BROADCOM BCM63XX ARM ARCHITECTURE +M: Florian Fainelli +L: linux-arm-kernel@lists.infradead.org +T: git git://git.github.com/brcm/linux.git +S: Maintained +F: arch/arm/mach-bcm/bcm63xx.c +F: arch/arm/include/debug/bcm63xx.S + BROADCOM BCM7XXX ARM ARCHITECTURE M: Marc Carino M: Brian Norris -- cgit v1.2.3-59-g8ed1b From ab95eac99c1714ede92800a9c33f4c96ce8d558c Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 15 Sep 2014 21:56:17 -0700 Subject: MAINTAINERS: add l2-mtd.git, 'next' tree for MTD We've been semi-officially queueing patches here for a while, and it's in linux-next, so let's advertise it in MAINTAINERS. Signed-off-by: Brian Norris Acked-by: Artem Bityutskiy --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 809ecd680d88..55a61f242991 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5898,6 +5898,7 @@ L: linux-mtd@lists.infradead.org W: http://www.linux-mtd.infradead.org/ Q: http://patchwork.ozlabs.org/project/linux-mtd/list/ T: git git://git.infradead.org/linux-mtd.git +T: git git://git.infradead.org/l2-mtd.git S: Maintained F: drivers/mtd/ F: include/linux/mtd/ -- cgit v1.2.3-59-g8ed1b From 18f340f90e087c078c634d5c4fed5e0d632d4fb6 Mon Sep 17 00:00:00 2001 From: Paul Zimmerman Date: Fri, 19 Sep 2014 14:49:36 -0700 Subject: usb: dwc2: add T: line to MAINTAINERS showing Felipe's tree Starting with v3.18-rc, patches for dwc2 will go through Felipe's tree. Add a T: line to MAINTAINERS to document this. Signed-off-by: Paul Zimmerman Signed-off-by: Greg Kroah-Hartman --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 3ca017a3ddea..294eef99277c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2849,6 +2849,7 @@ F: drivers/platform/x86/dell-wmi.c DESIGNWARE USB2 DRD IP DRIVER M: Paul Zimmerman L: linux-usb@vger.kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git S: Maintained F: drivers/usb/dwc2/ -- cgit v1.2.3-59-g8ed1b From 249c697e5e2c8e1347d79be0a9c93a985f2ad12e Mon Sep 17 00:00:00 2001 From: Antti Palosaari Date: Mon, 1 Sep 2014 19:44:59 -0300 Subject: [media] MAINTAINERS: IT913X driver filenames I removed tuner_ prefix from the driver file names. Update maintainers entry according to that. Signed-off-by: Antti Palosaari Signed-off-by: Mauro Carvalho Chehab --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index fbc1af4fc870..6eb60a4e7339 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5066,7 +5066,7 @@ W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained -F: drivers/media/tuners/tuner_it913x* +F: drivers/media/tuners/it913x* IVTV VIDEO4LINUX DRIVER M: Andy Walls -- cgit v1.2.3-59-g8ed1b From e5ab1477bc4d213c602cb7427b6594db35d5c4c4 Mon Sep 17 00:00:00 2001 From: Antti Palosaari Date: Wed, 10 Sep 2014 04:20:15 -0300 Subject: [media] MAINTAINERS: add HackRF SDR driver HackRF SDR driver. Video4Linux USB device. Signed-off-by: Antti Palosaari Signed-off-by: Mauro Carvalho Chehab --- MAINTAINERS | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 6eb60a4e7339..5958dd2bb7cc 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4172,6 +4172,16 @@ L: linuxppc-dev@lists.ozlabs.org S: Odd Fixes F: drivers/tty/hvc/ +HACKRF MEDIA DRIVER +M: Antti Palosaari +L: linux-media@vger.kernel.org +W: http://linuxtv.org/ +W: http://palosaari.fi/linux/ +Q: http://patchwork.linuxtv.org/project/linux-media/list/ +T: git git://linuxtv.org/anttip/media_tree.git +S: Maintained +F: drivers/media/usb/hackrf/ + HARDWARE MONITORING M: Jean Delvare M: Guenter Roeck -- cgit v1.2.3-59-g8ed1b From 93c090b36ae977bc719f7ea2fcbf4a2e8e92e439 Mon Sep 17 00:00:00 2001 From: Kamil Debski Date: Wed, 20 Aug 2014 11:29:23 +0200 Subject: MAINTAINERS: add entry for the PWM fan driver Signed-off-by: Kamil Debski Signed-off-by: Guenter Roeck --- MAINTAINERS | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 809ecd680d88..3ac835a79ae6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7284,6 +7284,14 @@ T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/usb/pwc/* +PWM FAN DRIVER +M: Kamil Debski +L: lm-sensors@lm-sensors.org +S: Supported +F: Documentation/devicetree/bindings/hwmon/pwm-fan.txt +F: Documentation/hwmon/pwm-fan +F: drivers/hwmon/pwm-fan.c + PWM SUBSYSTEM M: Thierry Reding L: linux-pwm@vger.kernel.org -- cgit v1.2.3-59-g8ed1b From c9b1a5b5c24d3249f5b618b900d64a6a76f8dd23 Mon Sep 17 00:00:00 2001 From: Harish Patil Date: Thu, 18 Sep 2014 17:27:25 -0400 Subject: Update qlge driver maintainers list Signed-off-by: Harish Patil Signed-off-by: David S. Miller --- MAINTAINERS | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 5e3709ebc234..b4e23acc6441 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7385,9 +7385,9 @@ S: Supported F: drivers/net/ethernet/qlogic/qlcnic/ QLOGIC QLGE 10Gb ETHERNET DRIVER -M: Shahed Shaikh -M: Jitendra Kalsaria -M: Ron Mercer +M: Harish Patil +M: Sudarsana Kalluru +M: Dept-GELinuxNICDev@qlogic.com M: linux-driver@qlogic.com L: netdev@vger.kernel.org S: Supported -- cgit v1.2.3-59-g8ed1b From 990a6a997774fae9667f08805ea6c7fe25381b84 Mon Sep 17 00:00:00 2001 From: Olli Salonen Date: Fri, 22 Aug 2014 13:50:42 -0300 Subject: [media] MAINTAINERS: add sp2 entry Add a maintainer for the new CIMaX SP2 driver. Signed-off-by: Olli Salonen Signed-off-by: Mauro Carvalho Chehab --- MAINTAINERS | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 5958dd2bb7cc..e80a2755b3e8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8594,6 +8594,14 @@ F: include/sound/dmaengine_pcm.h F: sound/core/pcm_dmaengine.c F: sound/soc/soc-generic-dmaengine-pcm.c +SP2 MEDIA DRIVER +M: Olli Salonen +L: linux-media@vger.kernel.org +W: http://linuxtv.org/ +Q: http://patchwork.linuxtv.org/project/linux-media/list/ +S: Maintained +F: drivers/media/dvb-frontends/sp2* + SPARC + UltraSPARC (sparc/sparc64) M: "David S. Miller" L: sparclinux@vger.kernel.org -- cgit v1.2.3-59-g8ed1b From 6e68e6c5e4b67a02aaa406da6124ea5cae7d5e10 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 22 Sep 2014 23:42:50 +0000 Subject: ftracetest: Initial commit for ftracetest ftracetest is a collection of testcase shell-scripts for ftrace. To avoid regressions of ftrace, these testcases check correct ftrace behaviors. If someone would like to add any features on ftrace, the patch series should have at least one testcase for checking the new behavior. Link: http://lkml.kernel.org/p/20140922234250.23415.68758.stgit@kbuild-f20.novalocal Acked-by: Shuah Khan Acked-by: Namhyung Kim Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt --- MAINTAINERS | 1 + tools/testing/selftests/Makefile | 1 + tools/testing/selftests/ftrace/Makefile | 7 ++ tools/testing/selftests/ftrace/README | 45 +++++++ tools/testing/selftests/ftrace/ftracetest | 159 +++++++++++++++++++++++++ tools/testing/selftests/ftrace/test.d/template | 4 + 6 files changed, 217 insertions(+) create mode 100644 tools/testing/selftests/ftrace/Makefile create mode 100644 tools/testing/selftests/ftrace/README create mode 100755 tools/testing/selftests/ftrace/ftracetest create mode 100644 tools/testing/selftests/ftrace/test.d/template (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 670b3dcce2de..bb455d0ae12a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9310,6 +9310,7 @@ F: include/*/ftrace.h F: include/linux/trace*.h F: include/trace/ F: kernel/trace/ +F: tools/testing/selftests/ftrace/ TRIVIAL PATCHES M: Jiri Kosina diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 36ff2e4c7b6f..45f145c6f843 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@ -14,6 +14,7 @@ TARGETS += powerpc TARGETS += user TARGETS += sysctl TARGETS += firmware +TARGETS += ftrace TARGETS_HOTPLUG = cpu-hotplug TARGETS_HOTPLUG += memory-hotplug diff --git a/tools/testing/selftests/ftrace/Makefile b/tools/testing/selftests/ftrace/Makefile new file mode 100644 index 000000000000..76cc9f156267 --- /dev/null +++ b/tools/testing/selftests/ftrace/Makefile @@ -0,0 +1,7 @@ +all: + +run_tests: + @/bin/sh ./ftracetest || echo "ftrace selftests: [FAIL]" + +clean: + rm -rf logs/* diff --git a/tools/testing/selftests/ftrace/README b/tools/testing/selftests/ftrace/README new file mode 100644 index 000000000000..b8631f03e754 --- /dev/null +++ b/tools/testing/selftests/ftrace/README @@ -0,0 +1,45 @@ +Linux Ftrace Testcases + +This is a collection of testcases for ftrace tracing feature in the Linux +kernel. Since ftrace exports interfaces via the debugfs, we just need +shell scripts for testing. Feel free to add new test cases. + +Running the ftrace testcases +============================ + +At first, you need to be the root user to run this script. +To run all testcases: + + $ sudo ./ftracetest + +To run specific testcases: + + # ./ftracetest test.d/basic3.tc + +Or you can also run testcases under given directory: + + # ./ftracetest test.d/kprobe/ + +Contributing new testcases +========================== + +Copy test.d/template to your testcase (whose filename must have *.tc +extension) and rewrite the test description line. + + * The working directory of the script is /tracing/. + + * Take care with side effects as the tests are run with root privilege. + + * The tests should not run for a long period of time (more than 1 min.) + These are to be unit tests. + + * You can add a directory for your testcases under test.d/ if needed. + + * The test cases should run on dash (busybox shell) for testing on + minimal cross-build environments. + +TODO +==== + + * Fancy colored output :) + diff --git a/tools/testing/selftests/ftrace/ftracetest b/tools/testing/selftests/ftrace/ftracetest new file mode 100755 index 000000000000..4c6c2fad2cda --- /dev/null +++ b/tools/testing/selftests/ftrace/ftracetest @@ -0,0 +1,159 @@ +#!/bin/sh + +# ftracetest - Ftrace test shell scripts +# +# Copyright (C) Hitachi Ltd., 2014 +# Written by Masami Hiramatsu +# +# Released under the terms of the GPL v2. + +usage() { # errno [message] +[ "$2" ] && echo $2 +echo "Usage: ftracetest [options] [testcase(s)] [testcase-directory(s)]" +echo " Options:" +echo " -h|--help Show help message" +echo " -k|--keep Keep passed test logs" +echo " -d|--debug Debug mode (trace all shell commands)" +exit $1 +} + +errexit() { # message + echo "Error: $1" 1>&2 + exit 1 +} + +# Ensuring user privilege +if [ `id -u` -ne 0 ]; then + errexit "this must be run by root user" +fi + +# Utilities +absdir() { # file_path + (cd `dirname $1`; pwd) +} + +abspath() { + echo `absdir $1`/`basename $1` +} + +find_testcases() { #directory + echo `find $1 -name \*.tc` +} + +parse_opts() { # opts + local OPT_TEST_CASES= + local OPT_TEST_DIR= + + while [ "$1" ]; do + case "$1" in + --help|-h) + usage 0 + ;; + --keep|-k) + KEEP_LOG=1 + shift 1 + ;; + --debug|-d) + DEBUG=1 + shift 1 + ;; + *.tc) + if [ -f "$1" ]; then + OPT_TEST_CASES="$OPT_TEST_CASES `abspath $1`" + shift 1 + else + usage 1 "$1 is not a testcase" + fi + ;; + *) + if [ -d "$1" ]; then + OPT_TEST_DIR=`abspath $1` + OPT_TEST_CASES="$OPT_TEST_CASES `find_testcases $OPT_TEST_DIR`" + shift 1 + else + usage 1 "Invalid option ($1)" + fi + ;; + esac + done + if [ "$OPT_TEST_CASES" ]; then + TEST_CASES=$OPT_TEST_CASES + fi +} + +# Parameters +DEBUGFS_DIR=`grep debugfs /proc/mounts | cut -f2 -d' '` +TRACING_DIR=$DEBUGFS_DIR/tracing +TOP_DIR=`absdir $0` +TEST_DIR=$TOP_DIR/test.d +TEST_CASES=`find_testcases $TEST_DIR` +LOG_DIR=$TOP_DIR/logs/`date +%Y%m%d-%H%M%S`/ +KEEP_LOG=0 +DEBUG=0 +# Parse command-line options +parse_opts $* + +[ $DEBUG -ne 0 ] && set -x + +# Verify parameters +if [ -z "$DEBUGFS_DIR" -o ! -d "$TRACING_DIR" ]; then + errexit "No ftrace directory found" +fi + +# Preparing logs +LOG_FILE=$LOG_DIR/ftracetest.log +mkdir -p $LOG_DIR || errexit "Failed to make a log directory: $LOG_DIR" +date > $LOG_FILE +prlog() { # messages + echo "$@" | tee -a $LOG_FILE +} +catlog() { #file + cat $1 | tee -a $LOG_FILE +} +prlog "=== Ftrace unit tests ===" + + +# Testcase management +PASSED_CASES= +FAILED_CASES= +CASENO=0 +testcase() { # testfile + CASENO=$((CASENO+1)) + prlog -n "[$CASENO]"`grep "^#[ \t]*description:" $1 | cut -f2 -d:` +} +failed() { + prlog " [FAIL]" + FAILED_CASES="$FAILED_CASES $CASENO" +} +passed() { + prlog " [PASS]" + PASSED_CASES="$PASSED_CASES $CASENO" +} + + +# Run one test case +run_test() { # testfile + local testname=`basename $1` + local testlog=`mktemp --tmpdir=$LOG_DIR ${testname}-XXXXXX.log` + testcase $1 + echo "execute: "$1 > $testlog + (cd $TRACING_DIR; set -x ; . $1) >> $testlog 2>&1 + ret=$? + if [ $ret -ne 0 ]; then + failed + catlog $testlog + else + passed + [ $KEEP_LOG -eq 0 ] && rm $testlog + fi +} + +# Main loop +for t in $TEST_CASES; do + run_test $t +done +prlog "" +prlog "# of passed: " `echo $PASSED_CASES | wc -w` +prlog "# of failed: " `echo $FAILED_CASES | wc -w` + +test -z "$FAILED_CASES" # if no error, return 0 diff --git a/tools/testing/selftests/ftrace/test.d/template b/tools/testing/selftests/ftrace/test.d/template new file mode 100644 index 000000000000..ce5f735b2e65 --- /dev/null +++ b/tools/testing/selftests/ftrace/test.d/template @@ -0,0 +1,4 @@ +#!/bin/sh +# description: %HERE DESCRIBE WHAT THIS DOES% +# you have to add ".tc" extention for your testcase file +exit 0 # Return 0 if the test is passed, otherwise return !0 -- cgit v1.2.3-59-g8ed1b From 15d036094b75e63a3817ec4f836959d38cecb1e6 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Thu, 28 Aug 2014 06:44:13 +0200 Subject: MAINTAINERS: Add xen pvscsi maintainer Add myself as maintainer for the Xen pvSCSI drivers. Signed-off-by: Juergen Gross Acked-by: Konrad Rzeszutek Wilk Signed-off-by: David Vrabel --- MAINTAINERS | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 670b3dcce2de..52a5fb69dbf5 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10156,6 +10156,15 @@ S: Supported F: drivers/block/xen-blkback/* F: drivers/block/xen* +XEN PVSCSI DRIVERS +M: Juergen Gross +L: xen-devel@lists.xenproject.org (moderated for non-subscribers) +L: linux-scsi@vger.kernel.org +S: Supported +F: drivers/scsi/xen-scsifront.c +F: drivers/xen/xen-scsiback.c +F: include/xen/interface/io/vscsiif.h + XEN SWIOTLB SUBSYSTEM M: Konrad Rzeszutek Wilk L: xen-devel@lists.xenproject.org (moderated for non-subscribers) -- cgit v1.2.3-59-g8ed1b From 62f6f0863e5b304284bcf9b80e12ec1bd4f01c9a Mon Sep 17 00:00:00 2001 From: Peter Griffin Date: Mon, 8 Sep 2014 13:04:48 +0100 Subject: MAINTAINERS: Add ehci-st.c and ohci-st.c to ARCH/STI architecture This patch adds the ehci-st.c and ohci-st.c files for the usb 2.0 & usb1.1 host controller drivers found on stih41x and stih4xx STMicroelectronics SoC's into the STI arch section of the maintainers file. Signed-off-by: Peter Griffin Reviewed-by: Arnd Bergmann Signed-off-by: Greg Kroah-Hartman --- MAINTAINERS | 2 ++ 1 file changed, 2 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index a41ffb508dda..51ecce8b785a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1399,6 +1399,8 @@ F: drivers/i2c/busses/i2c-st.c F: drivers/tty/serial/st-asc.c F: drivers/mmc/host/sdhci-st.c F: drivers/usb/dwc3/dwc3-st.c +F: drivers/usb/host/ehci-st.c +F: drivers/usb/host/ohci-st.c ARM/TECHNOLOGIC SYSTEMS TS7250 MACHINE SUPPORT M: Lennert Buytenhek -- cgit v1.2.3-59-g8ed1b From 833c95456a70826d1384883b73fd23aff24d366f Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 12 Sep 2014 09:01:56 +0200 Subject: device coredump: add new device coredump class Many devices run firmware and/or complex hardware, and most of that can have bugs. When it misbehaves, however, it is often much harder to debug than software running on the host. Introduce a "device coredump" mechanism to allow dumping internal device/firmware state through a generalized mechanism. As devices are different and information needed can vary accordingly, this doesn't prescribe a file format - it just provides mechanism to get data to be able to capture it in a generalized way (e.g. in distributions.) The dumped data will be readable in sysfs in the virtual device's data file under /sys/class/devcoredump/devcd*/. Writing to it will free the data and remove the device, as does a 5-minute timeout. Note that generalized capturing of such data may result in privacy issues, so users generally need to be involved. In order to allow certain users/system integrators/... to disable the feature at all, introduce a Kconfig option to override the drivers that would like to have the feature. For now, this provides two ways of dumping data: 1) with a vmalloc'ed area, that is then given to the subsystem and freed after retrieval or timeout 2) with a generalized reader/free function method We could/should add more options, e.g. a list of pages, since the vmalloc area is very limited on some architectures. Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman --- MAINTAINERS | 7 ++ drivers/base/Kconfig | 21 ++++ drivers/base/Makefile | 1 + drivers/base/devcoredump.c | 265 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/devcoredump.h | 35 ++++++ 5 files changed, 329 insertions(+) create mode 100644 drivers/base/devcoredump.c create mode 100644 include/linux/devcoredump.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 809ecd680d88..d810a6fa8276 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2859,6 +2859,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git S: Maintained F: drivers/usb/dwc3/ +DEVICE COREDUMP (DEV_COREDUMP) +M: Johannes Berg +L: linux-kernel@vger.kernel.org +S: Maintained +F: drivers/base/devcoredump.c +F: include/linux/devcoredump.h + DEVICE FREQUENCY (DEVFREQ) M: MyungJoo Ham M: Kyungmin Park diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 4e7f0ff83ae7..134f763d90fd 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -165,6 +165,27 @@ config FW_LOADER_USER_HELPER_FALLBACK If you are unsure about this, say N here. +config WANT_DEV_COREDUMP + bool + help + Drivers should "select" this option if they desire to use the + device coredump mechanism. + +config DISABLE_DEV_COREDUMP + bool "Disable device coredump" if EXPERT + help + Disable the device coredump mechanism despite drivers wanting to + use it; this allows for more sensitive systems or systems that + don't want to ever access the information to not have the code, + nor keep any data. + + If unsure, say N. + +config DEV_COREDUMP + bool + default y if WANT_DEV_COREDUMP + depends on !DISABLE_DEV_COREDUMP + config DEBUG_DRIVER bool "Driver Core verbose debug messages" depends on DEBUG_KERNEL diff --git a/drivers/base/Makefile b/drivers/base/Makefile index 4aab26ec0292..6922cd6850a2 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o obj-$(CONFIG_REGMAP) += regmap/ obj-$(CONFIG_SOC_BUS) += soc.o obj-$(CONFIG_PINCTRL) += pinctrl.o +obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c new file mode 100644 index 000000000000..96614b04544c --- /dev/null +++ b/drivers/base/devcoredump.c @@ -0,0 +1,265 @@ +/* + * This file is provided under the GPLv2 license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2014 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * Author: Johannes Berg + */ +#include +#include +#include +#include +#include +#include +#include + +/* if data isn't read by userspace after 5 minutes then delete it */ +#define DEVCD_TIMEOUT (HZ * 60 * 5) + +struct devcd_entry { + struct device devcd_dev; + const void *data; + size_t datalen; + struct module *owner; + ssize_t (*read)(char *buffer, loff_t offset, size_t count, + const void *data, size_t datalen); + void (*free)(const void *data); + struct delayed_work del_wk; + struct device *failing_dev; +}; + +static struct devcd_entry *dev_to_devcd(struct device *dev) +{ + return container_of(dev, struct devcd_entry, devcd_dev); +} + +static void devcd_dev_release(struct device *dev) +{ + struct devcd_entry *devcd = dev_to_devcd(dev); + + devcd->free(devcd->data); + module_put(devcd->owner); + + /* + * this seems racy, but I don't see a notifier or such on + * a struct device to know when it goes away? + */ + if (devcd->failing_dev->kobj.sd) + sysfs_delete_link(&devcd->failing_dev->kobj, &dev->kobj, + "devcoredump"); + + put_device(devcd->failing_dev); + kfree(devcd); +} + +static void devcd_del(struct work_struct *wk) +{ + struct devcd_entry *devcd; + + devcd = container_of(wk, struct devcd_entry, del_wk.work); + + device_del(&devcd->devcd_dev); + put_device(&devcd->devcd_dev); +} + +static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buffer, loff_t offset, size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + struct devcd_entry *devcd = dev_to_devcd(dev); + + return devcd->read(buffer, offset, count, devcd->data, devcd->datalen); +} + +static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buffer, loff_t offset, size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + struct devcd_entry *devcd = dev_to_devcd(dev); + + mod_delayed_work(system_wq, &devcd->del_wk, 0); + + return count; +} + +static struct bin_attribute devcd_attr_data = { + .attr = { .name = "data", .mode = S_IRUSR | S_IWUSR, }, + .size = 0, + .read = devcd_data_read, + .write = devcd_data_write, +}; + +static struct bin_attribute *devcd_dev_bin_attrs[] = { + &devcd_attr_data, NULL, +}; + +static const struct attribute_group devcd_dev_group = { + .bin_attrs = devcd_dev_bin_attrs, +}; + +static const struct attribute_group *devcd_dev_groups[] = { + &devcd_dev_group, NULL, +}; + +static struct class devcd_class = { + .name = "devcoredump", + .owner = THIS_MODULE, + .dev_release = devcd_dev_release, + .dev_groups = devcd_dev_groups, +}; + +static ssize_t devcd_readv(char *buffer, loff_t offset, size_t count, + const void *data, size_t datalen) +{ + if (offset > datalen) + return -EINVAL; + + if (offset + count > datalen) + count = datalen - offset; + + if (count) + memcpy(buffer, ((u8 *)data) + offset, count); + + return count; +} + +/** + * dev_coredumpv - create device coredump with vmalloc data + * @dev: the struct device for the crashed device + * @data: vmalloc data containing the device coredump + * @datalen: length of the data + * @gfp: allocation flags + * + * This function takes ownership of the vmalloc'ed data and will free + * it when it is no longer used. See dev_coredumpm() for more information. + */ +void dev_coredumpv(struct device *dev, const void *data, size_t datalen, + gfp_t gfp) +{ + dev_coredumpm(dev, NULL, data, datalen, gfp, devcd_readv, vfree); +} +EXPORT_SYMBOL_GPL(dev_coredumpv); + +static int devcd_match_failing(struct device *dev, const void *failing) +{ + struct devcd_entry *devcd = dev_to_devcd(dev); + + return devcd->failing_dev == failing; +} + +/** + * dev_coredumpm - create device coredump with read/free methods + * @dev: the struct device for the crashed device + * @owner: the module that contains the read/free functions, use %THIS_MODULE + * @data: data cookie for the @read/@free functions + * @datalen: length of the data + * @gfp: allocation flags + * @read: function to read from the given buffer + * @free: function to free the given buffer + * + * Creates a new device coredump for the given device. If a previous one hasn't + * been read yet, the new coredump is discarded. The data lifetime is determined + * by the device coredump framework and when it is no longer needed the @free + * function will be called to free the data. + */ +void dev_coredumpm(struct device *dev, struct module *owner, + const void *data, size_t datalen, gfp_t gfp, + ssize_t (*read)(char *buffer, loff_t offset, size_t count, + const void *data, size_t datalen), + void (*free)(const void *data)) +{ + static atomic_t devcd_count = ATOMIC_INIT(0); + struct devcd_entry *devcd; + struct device *existing; + + existing = class_find_device(&devcd_class, NULL, dev, + devcd_match_failing); + if (existing) { + put_device(existing); + goto free; + } + + if (!try_module_get(owner)) + goto free; + + devcd = kzalloc(sizeof(*devcd), gfp); + if (!devcd) + goto put_module; + + devcd->owner = owner; + devcd->data = data; + devcd->datalen = datalen; + devcd->read = read; + devcd->free = free; + devcd->failing_dev = get_device(dev); + + device_initialize(&devcd->devcd_dev); + + dev_set_name(&devcd->devcd_dev, "devcd%d", + atomic_inc_return(&devcd_count)); + devcd->devcd_dev.class = &devcd_class; + + if (device_add(&devcd->devcd_dev)) + goto put_device; + + if (sysfs_create_link(&devcd->devcd_dev.kobj, &dev->kobj, + "failing_device")) + /* nothing - symlink will be missing */; + + if (sysfs_create_link(&dev->kobj, &devcd->devcd_dev.kobj, + "devcoredump")) + /* nothing - symlink will be missing */; + + INIT_DELAYED_WORK(&devcd->del_wk, devcd_del); + schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT); + + return; + put_device: + put_device(&devcd->devcd_dev); + put_module: + module_put(owner); + free: + free(data); +} +EXPORT_SYMBOL_GPL(dev_coredumpm); + +static int __init devcoredump_init(void) +{ + return class_register(&devcd_class); +} +__initcall(devcoredump_init); + +static int devcd_free(struct device *dev, void *data) +{ + struct devcd_entry *devcd = dev_to_devcd(dev); + + flush_delayed_work(&devcd->del_wk); + return 0; +} + +static void __exit devcoredump_exit(void) +{ + class_for_each_device(&devcd_class, NULL, NULL, devcd_free); + class_unregister(&devcd_class); +} +__exitcall(devcoredump_exit); diff --git a/include/linux/devcoredump.h b/include/linux/devcoredump.h new file mode 100644 index 000000000000..c0a360e99f64 --- /dev/null +++ b/include/linux/devcoredump.h @@ -0,0 +1,35 @@ +#ifndef __DEVCOREDUMP_H +#define __DEVCOREDUMP_H + +#include +#include +#include + +#ifdef CONFIG_DEV_COREDUMP +void dev_coredumpv(struct device *dev, const void *data, size_t datalen, + gfp_t gfp); + +void dev_coredumpm(struct device *dev, struct module *owner, + const void *data, size_t datalen, gfp_t gfp, + ssize_t (*read)(char *buffer, loff_t offset, size_t count, + const void *data, size_t datalen), + void (*free)(const void *data)); +#else +static inline void dev_coredumpv(struct device *dev, const void *data, + size_t datalen, gfp_t gfp) +{ + vfree(data); +} + +static inline void +dev_coredumpm(struct device *dev, struct module *owner, + const void *data, size_t datalen, gfp_t gfp, + ssize_t (*read)(char *buffer, loff_t offset, size_t count, + const void *data, size_t datalen), + void (*free)(const void *data)) +{ + free(data); +} +#endif /* CONFIG_DEV_COREDUMP */ + +#endif /* __DEVCOREDUMP_H */ -- cgit v1.2.3-59-g8ed1b From 74316949a2f26b75e66cd1c8c8bad4cde1841645 Mon Sep 17 00:00:00 2001 From: Eli Billauer Date: Tue, 9 Sep 2014 09:38:01 +0300 Subject: MAINTAINERS: Move Xillybus out of staging Signed-off-by: Eli Billauer Signed-off-by: Greg Kroah-Hartman --- MAINTAINERS | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 5663713db076..c49cfd011d2f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8804,12 +8804,6 @@ M: Arnaud Patard S: Odd Fixes F: drivers/staging/xgifb/ -STAGING - XILLYBUS DRIVER -M: Eli Billauer -L: devel@driverdev.osuosl.org -S: Supported -F: drivers/staging/xillybus/ - STARFIRE/DURALAN NETWORK DRIVER M: Ion Badulescu S: Odd Fixes @@ -10218,6 +10212,12 @@ L: linux-serial@vger.kernel.org S: Maintained F: drivers/tty/serial/uartlite.c +XILLYBUS DRIVER +M: Eli Billauer +L: linux-kernel@vger.kernel.org +S: Supported +F: drivers/char/xillybus/ + XTENSA XTFPGA PLATFORM SUPPORT M: Max Filippov L: linux-xtensa@linux-xtensa.org -- cgit v1.2.3-59-g8ed1b From dfa5d19658a308b373ce0cb9f6be9338c16ce14f Mon Sep 17 00:00:00 2001 From: Balaji T K Date: Wed, 17 Sep 2014 22:50:11 +0530 Subject: MAINTAINERS: omap_hsmmc: remove myself from MAINTAINERS As I won't be able to maintain omap_hsmmc driver Signed-off-by: Balaji T K Signed-off-by: Ulf Hansson --- MAINTAINERS | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 5e7866a486b0..b296e43695b0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6540,10 +6540,9 @@ S: Maintained F: drivers/mmc/host/omap.c OMAP HS MMC SUPPORT -M: Balaji T K L: linux-mmc@vger.kernel.org L: linux-omap@vger.kernel.org -S: Maintained +S: Orphan F: drivers/mmc/host/omap_hsmmc.c OMAP RANDOM NUMBER GENERATOR SUPPORT -- cgit v1.2.3-59-g8ed1b From 6da969a5fe9768f4735480c91e4885cf9babf023 Mon Sep 17 00:00:00 2001 From: Peter Griffin Date: Thu, 11 Sep 2014 18:02:46 +0100 Subject: MAINTAINERS: Add phy-stih407-usb.c file to ARCH/STI architecture This patch adds the new phy-stih407-usb.c usb phy driver found on STMicroelectronics stih407 consumer electronics SoC's into the STI arch section of the maintainers file. Signed-off-by: Peter Griffin Signed-off-by: Kishon Vijay Abraham I --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 670b3dcce2de..93fd8afdd55e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1398,6 +1398,7 @@ F: drivers/media/rc/st_rc.c F: drivers/i2c/busses/i2c-st.c F: drivers/tty/serial/st-asc.c F: drivers/mmc/host/sdhci-st.c +F: drivers/phy/phy-stih407-usb.c ARM/TECHNOLOGIC SYSTEMS TS7250 MACHINE SUPPORT M: Lennert Buytenhek -- cgit v1.2.3-59-g8ed1b From 26389c78269ada2927a4ec114bbf4df45776445d Mon Sep 17 00:00:00 2001 From: Peter Griffin Date: Mon, 8 Sep 2014 11:33:02 +0100 Subject: MAINTAINERS: Add phy-stih41x-usb.c to ARCH/STI architecture This patch adds the new phy-sti41x-usb.c PHY driver found on STMicroelectronics stih41x consumer electronics SoC's into the STI arch section of the maintainers file. Signed-off-by: Peter Griffin Signed-off-by: Kishon Vijay Abraham I --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 93fd8afdd55e..a5cdbe94862c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1399,6 +1399,7 @@ F: drivers/i2c/busses/i2c-st.c F: drivers/tty/serial/st-asc.c F: drivers/mmc/host/sdhci-st.c F: drivers/phy/phy-stih407-usb.c +F: drivers/phy/phy-stih41x-usb.c ARM/TECHNOLOGIC SYSTEMS TS7250 MACHINE SUPPORT M: Lennert Buytenhek -- cgit v1.2.3-59-g8ed1b From e0c524049f8279d00d2fbd4748b03234a2726fdd Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Thu, 10 Jul 2014 11:30:08 -0400 Subject: MAINTAINERS: Add Keystone Multicore Navigator drivers entry Signed-off-by: Santosh Shilimkar --- MAINTAINERS | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index aefa94841ff3..119bc9d67512 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9086,6 +9086,15 @@ F: drivers/misc/tifm* F: drivers/mmc/host/tifm_sd.c F: include/linux/tifm.h +TI KEYSTONE MULTICORE NAVIGATOR DRIVERS +M: Santosh Shilimkar +L: linux-kernel@vger.kernel.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: drivers/soc/ti/* +T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git + + TI LM49xxx FAMILY ASoC CODEC DRIVERS M: M R Swami Reddy M: Vishwas A Deshpande -- cgit v1.2.3-59-g8ed1b From 8da5e30289c285025483f14c6923330f224d505c Mon Sep 17 00:00:00 2001 From: Robert Jarzmik Date: Wed, 24 Sep 2014 23:08:00 +0200 Subject: MAINTAINERS: update ARM pxa maintainers Change pxa active maintainers, and remove more busy people. Remove Eric's tree as it is not accessible anymore. Signed-off-by: Robert Jarzmik Acked-by: Daniel Mack Acked-by: Eric Miao Acked-by: Haojian Zhuang Acked-by: Russell King Signed-off-by: Arnd Bergmann --- MAINTAINERS | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 3a34ec04a00d..210e1039f6b1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7295,12 +7295,12 @@ F: drivers/video/backlight/pwm_bl.c F: include/linux/pwm_backlight.h PXA2xx/PXA3xx SUPPORT -M: Eric Miao -M: Russell King +M: Daniel Mack M: Haojian Zhuang +M: Robert Jarzmik L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) T: git git://github.com/hzhuang1/linux.git -T: git git://git.linaro.org/people/ycmiao/pxa-linux.git +T: git git://github.com/rjarzmik/linux.git S: Maintained F: arch/arm/mach-pxa/ F: drivers/pcmcia/pxa2xx* -- cgit v1.2.3-59-g8ed1b From 7c1e38769fa448de02ad6b6aa4b499fff4d89842 Mon Sep 17 00:00:00 2001 From: Carlo Caione Date: Fri, 12 Sep 2014 20:18:31 +0200 Subject: MAINTAINERS: Add entry for the Amlogic MesonX SoCs I'm going to maintain the platform. Signed-off-by: Carlo Caione Signed-off-by: Arnd Bergmann --- MAINTAINERS | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 210e1039f6b1..63dd79d22c20 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -835,6 +835,12 @@ M: Emilio López S: Maintained F: drivers/clk/sunxi/ +ARM/Amlogic MesonX SoC support +M: Carlo Caione +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +N: meson[x68] + ARM/ATMEL AT91RM9200 AND AT91SAM ARM ARCHITECTURES M: Andrew Victor M: Nicolas Ferre -- cgit v1.2.3-59-g8ed1b From 20651e0b218e3684fee5e46319a1ba363c864179 Mon Sep 17 00:00:00 2001 From: Kevin Hilman Date: Wed, 24 Sep 2014 16:30:00 -0700 Subject: MAINTAINERS: update entry for drivers/power/avs Some more AVS-related drivers are arriving. Update MAINTAINERS to reflect that myself and Nishanth will keep an eye on the new ones as well. Signed-off-by: Kevin Hilman --- MAINTAINERS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index aefa94841ff3..c3ee79417607 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8348,11 +8348,11 @@ S: Maintained F: Documentation/security/Smack.txt F: security/smack/ -SMARTREFLEX DRIVERS FOR ADAPTIVE VOLTAGE SCALING (AVS) +DRIVERS FOR ADAPTIVE VOLTAGE SCALING (AVS) M: Kevin Hilman M: Nishanth Menon S: Maintained -F: drivers/power/avs/smartreflex.c +F: drivers/power/avs/ F: include/linux/power/smartreflex.h L: linux-pm@vger.kernel.org -- cgit v1.2.3-59-g8ed1b From f18cf05038729a958732fbcc16730004dc1b84dd Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 19 Sep 2014 11:17:12 -0700 Subject: MAINTAINERS: add a third maintainer to mach-bcm Add myself as a third maintainer to the mach-bcm code to increase the chances the redundancy in the merging/reviewing process. Signed-off-by: Florian Fainelli Acked-by: Scott Branden Acked-by: Brian Norris Acked-by: Matt Porter Signed-off-by: Arnd Bergmann --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 63dd79d22c20..7de4e6429247 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2009,6 +2009,7 @@ F: drivers/net/ethernet/broadcom/bnx2x/ BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE M: Christian Daudt M: Matt Porter +M: Florian Fainelli L: bcm-kernel-feedback-list@broadcom.com T: git git://github.com/broadcom/mach-bcm S: Maintained -- cgit v1.2.3-59-g8ed1b From 48b490d23ef5aaf65e16e194e0fd2578a9b6497f Mon Sep 17 00:00:00 2001 From: Andreas Werner Date: Mon, 15 Sep 2014 09:36:30 +0200 Subject: MAINTAINERS: Adds Andreas Werner to maintainers list for MEN F21BMC Added maintainer for the following MEN F21BMC drivers: - menf21bmc (MFD) - menf21bmc_wdt (Watchdog) - menf21bmc_hwmon (HWMON) - leds-menf21bmc (LED) Signed-off-by: Andreas Werner Acked-by: Guenter Roeck Acked-by: Bryan Wu Acked-by: Wim Van Sebroeck Signed-off-by: Lee Jones --- MAINTAINERS | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 1ff06dee651d..3e56237107e9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5910,6 +5910,15 @@ S: Supported F: drivers/mcb/ F: include/linux/mcb.h +MEN F21BMC (Board Management Controller) +M: Andreas Werner +S: Supported +F: drivers/mfd/menf21bmc.c +F: drivers/watchdog/menf21bmc_wdt.c +F: drivers/leds/leds-menf21bmc.c +F: drivers/hwmon/menf21bmc_hwmon.c +F: Documentation/hwmon/menf21bmc + METAG ARCHITECTURE M: James Hogan L: linux-metag@vger.kernel.org -- cgit v1.2.3-59-g8ed1b From 53007a7bfefd98cc3dd121f3b1ceedc6dcf08cc3 Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Fri, 26 Sep 2014 11:05:17 +0200 Subject: MAINTAINERS: update location of linux-doc tree Signed-off-by: Jiri Kosina --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 37054306dc9f..a9622c7e4ce1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3019,6 +3019,7 @@ F: Documentation/ X: Documentation/ABI/ X: Documentation/devicetree/ X: Documentation/[a-z][a-z]_[A-Z][A-Z]/ +T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/doc.git DOUBLETALK DRIVER M: "James R. Van Zandt" -- cgit v1.2.3-59-g8ed1b From e54951c8585e8e950ac04b15728910cc5a64e612 Mon Sep 17 00:00:00 2001 From: Matthias Brugger Date: Fri, 26 Sep 2014 11:45:55 +0200 Subject: MAINTAINERS: Add maintainers entry for Mediatek SoCs I plan to stay with the Mediatek SoCs for the next future and hope to expand its support along the way with the help of a whole bunch of people. Signed-off-by: Matthias Brugger Signed-off-by: Arnd Bergmann --- MAINTAINERS | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 7de4e6429247..4fe9320388e7 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1156,6 +1156,16 @@ W: http://www.digriz.org.uk/ts78xx/kernel S: Maintained F: arch/arm/mach-orion5x/ts78xx-* +ARM/Mediatek SoC support +M: Matthias Brugger +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: arch/arm/boot/dts/mt6* +F: arch/arm/boot/dts/mt8* +F: arch/arm/mach-mediatek/ +N: mtk +K: mediatek + ARM/MICREL KS8695 ARCHITECTURE M: Greg Ungerer L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -- cgit v1.2.3-59-g8ed1b From b182427efa7dbd9ed301dea7aae8a5c0fbbe5e53 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Sun, 28 Sep 2014 14:03:06 +0200 Subject: MAINTAINERS: change rt2x00 maintainer After short chat with Ivo, we decided that I'll take maintenance of rt2x00 driver. Thanks for Ivo's great work in the past! Signed-off-by: Stanislaw Gruszka Acked-by: Ivo Van Doorn Signed-off-by: John W. Linville --- MAINTAINERS | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 94ad1bab633a..482888d80431 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7474,13 +7474,12 @@ F: drivers/video/fbdev/aty/aty128fb.c RALINK RT2X00 WIRELESS LAN DRIVER P: rt2x00 project -M: Ivo van Doorn +M: Stanislaw Gruszka M: Helmut Schaa L: linux-wireless@vger.kernel.org L: users@rt2x00.serialmonkey.com (moderated for non-subscribers) W: http://rt2x00.serialmonkey.com/ S: Maintained -T: git git://git.kernel.org/pub/scm/linux/kernel/git/ivd/rt2x00.git F: drivers/net/wireless/rt2x00/ RAMDISK RAM BLOCK DEVICE DRIVER -- cgit v1.2.3-59-g8ed1b From 5f6b6ccdbe1cdfa5aa4347ec5412509b8995db27 Mon Sep 17 00:00:00 2001 From: Tanmay Inamdar Date: Wed, 1 Oct 2014 13:01:35 -0600 Subject: PCI: xgene: Add APM X-Gene PCIe driver Add the AppliedMicro X-Gene SOC PCIe host controller driver. The X-Gene PCIe controller supports up to 8 lanes and GEN3 speed. The X-Gene SOC supports up to 5 PCIe ports. [bhelgaas: folded in MAINTAINERS and bindings updates] Tested-by: Ming Lei Tested-by: Dann Frazier Signed-off-by: Tanmay Inamdar Signed-off-by: Bjorn Helgaas Reviewed-by: Liviu Dudau (driver) --- .../devicetree/bindings/pci/xgene-pci.txt | 57 ++ MAINTAINERS | 8 + drivers/pci/host/Kconfig | 10 + drivers/pci/host/Makefile | 1 + drivers/pci/host/pci-xgene.c | 659 +++++++++++++++++++++ 5 files changed, 735 insertions(+) create mode 100644 Documentation/devicetree/bindings/pci/xgene-pci.txt create mode 100644 drivers/pci/host/pci-xgene.c (limited to 'MAINTAINERS') diff --git a/Documentation/devicetree/bindings/pci/xgene-pci.txt b/Documentation/devicetree/bindings/pci/xgene-pci.txt new file mode 100644 index 000000000000..1070b068c7c6 --- /dev/null +++ b/Documentation/devicetree/bindings/pci/xgene-pci.txt @@ -0,0 +1,57 @@ +* AppliedMicro X-Gene PCIe interface + +Required properties: +- device_type: set to "pci" +- compatible: should contain "apm,xgene-pcie" to identify the core. +- reg: A list of physical base address and length for each set of controller + registers. Must contain an entry for each entry in the reg-names + property. +- reg-names: Must include the following entries: + "csr": controller configuration registers. + "cfg": pcie configuration space registers. +- #address-cells: set to <3> +- #size-cells: set to <2> +- ranges: ranges for the outbound memory, I/O regions. +- dma-ranges: ranges for the inbound memory regions. +- #interrupt-cells: set to <1> +- interrupt-map-mask and interrupt-map: standard PCI properties + to define the mapping of the PCIe interface to interrupt + numbers. +- clocks: from common clock binding: handle to pci clock. + +Optional properties: +- status: Either "ok" or "disabled". +- dma-coherent: Present if dma operations are coherent + +Example: + +SoC specific DT Entry: + + pcie0: pcie@1f2b0000 { + status = "disabled"; + device_type = "pci"; + compatible = "apm,xgene-storm-pcie", "apm,xgene-pcie"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = < 0x00 0x1f2b0000 0x0 0x00010000 /* Controller registers */ + 0xe0 0xd0000000 0x0 0x00040000>; /* PCI config space */ + reg-names = "csr", "cfg"; + ranges = <0x01000000 0x00 0x00000000 0xe0 0x10000000 0x00 0x00010000 /* io */ + 0x02000000 0x00 0x80000000 0xe1 0x80000000 0x00 0x80000000>; /* mem */ + dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000 + 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>; + interrupt-map-mask = <0x0 0x0 0x0 0x7>; + interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xc2 0x1 + 0x0 0x0 0x0 0x2 &gic 0x0 0xc3 0x1 + 0x0 0x0 0x0 0x3 &gic 0x0 0xc4 0x1 + 0x0 0x0 0x0 0x4 &gic 0x0 0xc5 0x1>; + dma-coherent; + clocks = <&pcie0clk 0>; + }; + + +Board specific DT Entry: + &pcie0 { + status = "ok"; + }; diff --git a/MAINTAINERS b/MAINTAINERS index 1ff06dee651d..076eb68874f4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6868,6 +6868,14 @@ F: include/linux/pci* F: arch/x86/pci/ F: arch/x86/kernel/quirks.c +PCI DRIVER FOR APPLIEDMICRO XGENE +M: Tanmay Inamdar +L: linux-pci@vger.kernel.org +L: linux-arm-kernel@lists.infradead.org +S: Maintained +F: Documentation/devicetree/bindings/pci/xgene-pci.txt +F: drivers/pci/host/pci-xgene.c + PCI DRIVER FOR IMX6 M: Richard Zhu M: Shawn Guo diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig index 8922c376456a..d55de1b6ddde 100644 --- a/drivers/pci/host/Kconfig +++ b/drivers/pci/host/Kconfig @@ -63,4 +63,14 @@ config PCIE_SPEAR13XX help Say Y here if you want PCIe support on SPEAr13XX SoCs. +config PCI_XGENE + bool "X-Gene PCIe controller" + depends on ARCH_XGENE + depends on OF + select PCIEPORTBUS + help + Say Y here if you want internal PCI support on APM X-Gene SoC. + There are 5 internal PCIe ports available. Each port is GEN3 capable + and have varied lanes from x1 to x8. + endmenu diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile index d0e88f114ff9..845611ff3de9 100644 --- a/drivers/pci/host/Makefile +++ b/drivers/pci/host/Makefile @@ -8,3 +8,4 @@ obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o obj-$(CONFIG_PCI_RCAR_GEN2_PCIE) += pcie-rcar.o obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o +obj-$(CONFIG_PCI_XGENE) += pci-xgene.o diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c new file mode 100644 index 000000000000..9ecabfa8c634 --- /dev/null +++ b/drivers/pci/host/pci-xgene.c @@ -0,0 +1,659 @@ +/** + * APM X-Gene PCIe Driver + * + * Copyright (c) 2014 Applied Micro Circuits Corporation. + * + * Author: Tanmay Inamdar . + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PCIECORE_CTLANDSTATUS 0x50 +#define PIM1_1L 0x80 +#define IBAR2 0x98 +#define IR2MSK 0x9c +#define PIM2_1L 0xa0 +#define IBAR3L 0xb4 +#define IR3MSKL 0xbc +#define PIM3_1L 0xc4 +#define OMR1BARL 0x100 +#define OMR2BARL 0x118 +#define OMR3BARL 0x130 +#define CFGBARL 0x154 +#define CFGBARH 0x158 +#define CFGCTL 0x15c +#define RTDID 0x160 +#define BRIDGE_CFG_0 0x2000 +#define BRIDGE_CFG_4 0x2010 +#define BRIDGE_STATUS_0 0x2600 + +#define LINK_UP_MASK 0x00000100 +#define AXI_EP_CFG_ACCESS 0x10000 +#define EN_COHERENCY 0xF0000000 +#define EN_REG 0x00000001 +#define OB_LO_IO 0x00000002 +#define XGENE_PCIE_VENDORID 0x10E8 +#define XGENE_PCIE_DEVICEID 0xE004 +#define SZ_1T (SZ_1G*1024ULL) +#define PIPE_PHY_RATE_RD(src) ((0xc000 & (u32)(src)) >> 0xe) + +struct xgene_pcie_port { + struct device_node *node; + struct device *dev; + struct clk *clk; + void __iomem *csr_base; + void __iomem *cfg_base; + unsigned long cfg_addr; + bool link_up; +}; + +static inline u32 pcie_bar_low_val(u32 addr, u32 flags) +{ + return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags; +} + +/* PCIe Configuration Out/In */ +static inline void xgene_pcie_cfg_out32(void __iomem *addr, int offset, u32 val) +{ + writel(val, addr + offset); +} + +static inline void xgene_pcie_cfg_out16(void __iomem *addr, int offset, u16 val) +{ + u32 val32 = readl(addr + (offset & ~0x3)); + + switch (offset & 0x3) { + case 2: + val32 &= ~0xFFFF0000; + val32 |= (u32)val << 16; + break; + case 0: + default: + val32 &= ~0xFFFF; + val32 |= val; + break; + } + writel(val32, addr + (offset & ~0x3)); +} + +static inline void xgene_pcie_cfg_out8(void __iomem *addr, int offset, u8 val) +{ + u32 val32 = readl(addr + (offset & ~0x3)); + + switch (offset & 0x3) { + case 0: + val32 &= ~0xFF; + val32 |= val; + break; + case 1: + val32 &= ~0xFF00; + val32 |= (u32)val << 8; + break; + case 2: + val32 &= ~0xFF0000; + val32 |= (u32)val << 16; + break; + case 3: + default: + val32 &= ~0xFF000000; + val32 |= (u32)val << 24; + break; + } + writel(val32, addr + (offset & ~0x3)); +} + +static inline void xgene_pcie_cfg_in32(void __iomem *addr, int offset, u32 *val) +{ + *val = readl(addr + offset); +} + +static inline void xgene_pcie_cfg_in16(void __iomem *addr, int offset, u32 *val) +{ + *val = readl(addr + (offset & ~0x3)); + + switch (offset & 0x3) { + case 2: + *val >>= 16; + break; + } + + *val &= 0xFFFF; +} + +static inline void xgene_pcie_cfg_in8(void __iomem *addr, int offset, u32 *val) +{ + *val = readl(addr + (offset & ~0x3)); + + switch (offset & 0x3) { + case 3: + *val = *val >> 24; + break; + case 2: + *val = *val >> 16; + break; + case 1: + *val = *val >> 8; + break; + } + *val &= 0xFF; +} + +/* + * When the address bit [17:16] is 2'b01, the Configuration access will be + * treated as Type 1 and it will be forwarded to external PCIe device. + */ +static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus) +{ + struct xgene_pcie_port *port = bus->sysdata; + + if (bus->number >= (bus->primary + 1)) + return port->cfg_base + AXI_EP_CFG_ACCESS; + + return port->cfg_base; +} + +/* + * For Configuration request, RTDID register is used as Bus Number, + * Device Number and Function number of the header fields. + */ +static void xgene_pcie_set_rtdid_reg(struct pci_bus *bus, uint devfn) +{ + struct xgene_pcie_port *port = bus->sysdata; + unsigned int b, d, f; + u32 rtdid_val = 0; + + b = bus->number; + d = PCI_SLOT(devfn); + f = PCI_FUNC(devfn); + + if (!pci_is_root_bus(bus)) + rtdid_val = (b << 8) | (d << 3) | f; + + writel(rtdid_val, port->csr_base + RTDID); + /* read the register back to ensure flush */ + readl(port->csr_base + RTDID); +} + +/* + * X-Gene PCIe port uses BAR0-BAR1 of RC's configuration space as + * the translation from PCI bus to native BUS. Entire DDR region + * is mapped into PCIe space using these registers, so it can be + * reached by DMA from EP devices. The BAR0/1 of bridge should be + * hidden during enumeration to avoid the sizing and resource allocation + * by PCIe core. + */ +static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset) +{ + if (pci_is_root_bus(bus) && ((offset == PCI_BASE_ADDRESS_0) || + (offset == PCI_BASE_ADDRESS_1))) + return true; + + return false; +} + +static int xgene_pcie_read_config(struct pci_bus *bus, unsigned int devfn, + int offset, int len, u32 *val) +{ + struct xgene_pcie_port *port = bus->sysdata; + void __iomem *addr; + + if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (xgene_pcie_hide_rc_bars(bus, offset)) { + *val = 0; + return PCIBIOS_SUCCESSFUL; + } + + xgene_pcie_set_rtdid_reg(bus, devfn); + addr = xgene_pcie_get_cfg_base(bus); + switch (len) { + case 1: + xgene_pcie_cfg_in8(addr, offset, val); + break; + case 2: + xgene_pcie_cfg_in16(addr, offset, val); + break; + default: + xgene_pcie_cfg_in32(addr, offset, val); + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +static int xgene_pcie_write_config(struct pci_bus *bus, unsigned int devfn, + int offset, int len, u32 val) +{ + struct xgene_pcie_port *port = bus->sysdata; + void __iomem *addr; + + if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (xgene_pcie_hide_rc_bars(bus, offset)) + return PCIBIOS_SUCCESSFUL; + + xgene_pcie_set_rtdid_reg(bus, devfn); + addr = xgene_pcie_get_cfg_base(bus); + switch (len) { + case 1: + xgene_pcie_cfg_out8(addr, offset, (u8)val); + break; + case 2: + xgene_pcie_cfg_out16(addr, offset, (u16)val); + break; + default: + xgene_pcie_cfg_out32(addr, offset, val); + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +static struct pci_ops xgene_pcie_ops = { + .read = xgene_pcie_read_config, + .write = xgene_pcie_write_config +}; + +static u64 xgene_pcie_set_ib_mask(void __iomem *csr_base, u32 addr, + u32 flags, u64 size) +{ + u64 mask = (~(size - 1) & PCI_BASE_ADDRESS_MEM_MASK) | flags; + u32 val32 = 0; + u32 val; + + val32 = readl(csr_base + addr); + val = (val32 & 0x0000ffff) | (lower_32_bits(mask) << 16); + writel(val, csr_base + addr); + + val32 = readl(csr_base + addr + 0x04); + val = (val32 & 0xffff0000) | (lower_32_bits(mask) >> 16); + writel(val, csr_base + addr + 0x04); + + val32 = readl(csr_base + addr + 0x04); + val = (val32 & 0x0000ffff) | (upper_32_bits(mask) << 16); + writel(val, csr_base + addr + 0x04); + + val32 = readl(csr_base + addr + 0x08); + val = (val32 & 0xffff0000) | (upper_32_bits(mask) >> 16); + writel(val, csr_base + addr + 0x08); + + return mask; +} + +static void xgene_pcie_linkup(struct xgene_pcie_port *port, + u32 *lanes, u32 *speed) +{ + void __iomem *csr_base = port->csr_base; + u32 val32; + + port->link_up = false; + val32 = readl(csr_base + PCIECORE_CTLANDSTATUS); + if (val32 & LINK_UP_MASK) { + port->link_up = true; + *speed = PIPE_PHY_RATE_RD(val32); + val32 = readl(csr_base + BRIDGE_STATUS_0); + *lanes = val32 >> 26; + } +} + +static int xgene_pcie_init_port(struct xgene_pcie_port *port) +{ + int rc; + + port->clk = clk_get(port->dev, NULL); + if (IS_ERR(port->clk)) { + dev_err(port->dev, "clock not available\n"); + return -ENODEV; + } + + rc = clk_prepare_enable(port->clk); + if (rc) { + dev_err(port->dev, "clock enable failed\n"); + return rc; + } + + return 0; +} + +static int xgene_pcie_map_reg(struct xgene_pcie_port *port, + struct platform_device *pdev) +{ + struct resource *res; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr"); + port->csr_base = devm_ioremap_resource(port->dev, res); + if (IS_ERR(port->csr_base)) + return PTR_ERR(port->csr_base); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); + port->cfg_base = devm_ioremap_resource(port->dev, res); + if (IS_ERR(port->cfg_base)) + return PTR_ERR(port->cfg_base); + port->cfg_addr = res->start; + + return 0; +} + +static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port, + struct resource *res, u32 offset, + u64 cpu_addr, u64 pci_addr) +{ + void __iomem *base = port->csr_base + offset; + resource_size_t size = resource_size(res); + u64 restype = resource_type(res); + u64 mask = 0; + u32 min_size; + u32 flag = EN_REG; + + if (restype == IORESOURCE_MEM) { + min_size = SZ_128M; + } else { + min_size = 128; + flag |= OB_LO_IO; + } + + if (size >= min_size) + mask = ~(size - 1) | flag; + else + dev_warn(port->dev, "res size 0x%llx less than minimum 0x%x\n", + (u64)size, min_size); + + writel(lower_32_bits(cpu_addr), base); + writel(upper_32_bits(cpu_addr), base + 0x04); + writel(lower_32_bits(mask), base + 0x08); + writel(upper_32_bits(mask), base + 0x0c); + writel(lower_32_bits(pci_addr), base + 0x10); + writel(upper_32_bits(pci_addr), base + 0x14); +} + +static void xgene_pcie_setup_cfg_reg(void __iomem *csr_base, u64 addr) +{ + writel(lower_32_bits(addr), csr_base + CFGBARL); + writel(upper_32_bits(addr), csr_base + CFGBARH); + writel(EN_REG, csr_base + CFGCTL); +} + +static int xgene_pcie_map_ranges(struct xgene_pcie_port *port, + struct list_head *res, + resource_size_t io_base) +{ + struct pci_host_bridge_window *window; + struct device *dev = port->dev; + int ret; + + list_for_each_entry(window, res, list) { + struct resource *res = window->res; + u64 restype = resource_type(res); + + dev_dbg(port->dev, "%pR\n", res); + + switch (restype) { + case IORESOURCE_IO: + xgene_pcie_setup_ob_reg(port, res, OMR3BARL, io_base, + res->start - window->offset); + ret = pci_remap_iospace(res, io_base); + if (ret < 0) + return ret; + break; + case IORESOURCE_MEM: + xgene_pcie_setup_ob_reg(port, res, OMR1BARL, res->start, + res->start - window->offset); + break; + case IORESOURCE_BUS: + break; + default: + dev_err(dev, "invalid resource %pR\n", res); + return -EINVAL; + } + } + xgene_pcie_setup_cfg_reg(port->csr_base, port->cfg_addr); + + return 0; +} + +static void xgene_pcie_setup_pims(void *addr, u64 pim, u64 size) +{ + writel(lower_32_bits(pim), addr); + writel(upper_32_bits(pim) | EN_COHERENCY, addr + 0x04); + writel(lower_32_bits(size), addr + 0x10); + writel(upper_32_bits(size), addr + 0x14); +} + +/* + * X-Gene PCIe support maximum 3 inbound memory regions + * This function helps to select a region based on size of region + */ +static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size) +{ + if ((size > 4) && (size < SZ_16M) && !(*ib_reg_mask & (1 << 1))) { + *ib_reg_mask |= (1 << 1); + return 1; + } + + if ((size > SZ_1K) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 0))) { + *ib_reg_mask |= (1 << 0); + return 0; + } + + if ((size > SZ_1M) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 2))) { + *ib_reg_mask |= (1 << 2); + return 2; + } + + return -EINVAL; +} + +static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port, + struct of_pci_range *range, u8 *ib_reg_mask) +{ + void __iomem *csr_base = port->csr_base; + void __iomem *cfg_base = port->cfg_base; + void *bar_addr; + void *pim_addr; + u64 cpu_addr = range->cpu_addr; + u64 pci_addr = range->pci_addr; + u64 size = range->size; + u64 mask = ~(size - 1) | EN_REG; + u32 flags = PCI_BASE_ADDRESS_MEM_TYPE_64; + u32 bar_low; + int region; + + region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size); + if (region < 0) { + dev_warn(port->dev, "invalid pcie dma-range config\n"); + return; + } + + if (range->flags & IORESOURCE_PREFETCH) + flags |= PCI_BASE_ADDRESS_MEM_PREFETCH; + + bar_low = pcie_bar_low_val((u32)cpu_addr, flags); + switch (region) { + case 0: + xgene_pcie_set_ib_mask(csr_base, BRIDGE_CFG_4, flags, size); + bar_addr = cfg_base + PCI_BASE_ADDRESS_0; + writel(bar_low, bar_addr); + writel(upper_32_bits(cpu_addr), bar_addr + 0x4); + pim_addr = csr_base + PIM1_1L; + break; + case 1: + bar_addr = csr_base + IBAR2; + writel(bar_low, bar_addr); + writel(lower_32_bits(mask), csr_base + IR2MSK); + pim_addr = csr_base + PIM2_1L; + break; + case 2: + bar_addr = csr_base + IBAR3L; + writel(bar_low, bar_addr); + writel(upper_32_bits(cpu_addr), bar_addr + 0x4); + writel(lower_32_bits(mask), csr_base + IR3MSKL); + writel(upper_32_bits(mask), csr_base + IR3MSKL + 0x4); + pim_addr = csr_base + PIM3_1L; + break; + } + + xgene_pcie_setup_pims(pim_addr, pci_addr, ~(size - 1)); +} + +static int pci_dma_range_parser_init(struct of_pci_range_parser *parser, + struct device_node *node) +{ + const int na = 3, ns = 2; + int rlen; + + parser->node = node; + parser->pna = of_n_addr_cells(node); + parser->np = parser->pna + na + ns; + + parser->range = of_get_property(node, "dma-ranges", &rlen); + if (!parser->range) + return -ENOENT; + parser->end = parser->range + rlen / sizeof(__be32); + + return 0; +} + +static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port) +{ + struct device_node *np = port->node; + struct of_pci_range range; + struct of_pci_range_parser parser; + struct device *dev = port->dev; + u8 ib_reg_mask = 0; + + if (pci_dma_range_parser_init(&parser, np)) { + dev_err(dev, "missing dma-ranges property\n"); + return -EINVAL; + } + + /* Get the dma-ranges from DT */ + for_each_of_pci_range(&parser, &range) { + u64 end = range.cpu_addr + range.size - 1; + + dev_dbg(port->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n", + range.flags, range.cpu_addr, end, range.pci_addr); + xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask); + } + return 0; +} + +/* clear BAR configuration which was done by firmware */ +static void xgene_pcie_clear_config(struct xgene_pcie_port *port) +{ + int i; + + for (i = PIM1_1L; i <= CFGCTL; i += 4) + writel(0x0, port->csr_base + i); +} + +static int xgene_pcie_setup(struct xgene_pcie_port *port, + struct list_head *res, + resource_size_t io_base) +{ + u32 val, lanes = 0, speed = 0; + int ret; + + xgene_pcie_clear_config(port); + + /* setup the vendor and device IDs correctly */ + val = (XGENE_PCIE_DEVICEID << 16) | XGENE_PCIE_VENDORID; + writel(val, port->csr_base + BRIDGE_CFG_0); + + ret = xgene_pcie_map_ranges(port, res, io_base); + if (ret) + return ret; + + ret = xgene_pcie_parse_map_dma_ranges(port); + if (ret) + return ret; + + xgene_pcie_linkup(port, &lanes, &speed); + if (!port->link_up) + dev_info(port->dev, "(rc) link down\n"); + else + dev_info(port->dev, "(rc) x%d gen-%d link up\n", + lanes, speed + 1); + return 0; +} + +static int xgene_pcie_probe_bridge(struct platform_device *pdev) +{ + struct device_node *dn = pdev->dev.of_node; + struct xgene_pcie_port *port; + resource_size_t iobase = 0; + struct pci_bus *bus; + int ret; + LIST_HEAD(res); + + port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL); + if (!port) + return -ENOMEM; + port->node = of_node_get(pdev->dev.of_node); + port->dev = &pdev->dev; + + ret = xgene_pcie_map_reg(port, pdev); + if (ret) + return ret; + + ret = xgene_pcie_init_port(port); + if (ret) + return ret; + + ret = of_pci_get_host_bridge_resources(dn, 0, 0xff, &res, &iobase); + if (ret) + return ret; + + ret = xgene_pcie_setup(port, &res, iobase); + if (ret) + return ret; + + bus = pci_scan_root_bus(&pdev->dev, 0, &xgene_pcie_ops, port, &res); + if (!bus) + return -ENOMEM; + + platform_set_drvdata(pdev, port); + return 0; +} + +static const struct of_device_id xgene_pcie_match_table[] = { + {.compatible = "apm,xgene-pcie",}, + {}, +}; + +static struct platform_driver xgene_pcie_driver = { + .driver = { + .name = "xgene-pcie", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(xgene_pcie_match_table), + }, + .probe = xgene_pcie_probe_bridge, +}; +module_platform_driver(xgene_pcie_driver); + +MODULE_AUTHOR("Tanmay Inamdar "); +MODULE_DESCRIPTION("APM X-Gene PCIe driver"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3-59-g8ed1b From 5529c2cdfd87e64c5801bfb9788a89d78de03414 Mon Sep 17 00:00:00 2001 From: Krzysztof Hałasa Date: Tue, 30 Sep 2014 10:32:58 +0200 Subject: MAINTAINERS: CNS3xxx and IXP4xx update. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I'm told Anton Vorontsov can't maintain Cavium Econa CNS3xxx support anymore. Perhaps I can. Also changing my email contact address for IXP4xx. Signed-off-by: Krzysztof Hałasa Signed-off-by: Arnd Bergmann --- MAINTAINERS | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 4fe9320388e7..f04c8b672622 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -867,10 +867,9 @@ S: Maintained F: arch/arm/mach-highbank/ ARM/CAVIUM NETWORKS CNS3XXX MACHINE SUPPORT -M: Anton Vorontsov +M: Krzysztof Halasa S: Maintained F: arch/arm/mach-cns3xxx/ -T: git git://git.infradead.org/users/cbou/linux-cns3xxx.git ARM/CIRRUS LOGIC CLPS711X ARM ARCHITECTURE M: Alexander Shiyan @@ -1059,7 +1058,7 @@ S: Maintained ARM/INTEL IXP4XX ARM ARCHITECTURE M: Imre Kaloz -M: Krzysztof Halasa +M: Krzysztof Halasa L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-ixp4xx/ @@ -4787,7 +4786,7 @@ S: Odd fixes F: drivers/dma/iop-adma.c INTEL IXP4XX QMGR, NPE, ETHERNET and HSS SUPPORT -M: Krzysztof Halasa +M: Krzysztof Halasa S: Maintained F: arch/arm/mach-ixp4xx/include/mach/qmgr.h F: arch/arm/mach-ixp4xx/include/mach/npe.h -- cgit v1.2.3-59-g8ed1b From dba4b74d2da8798626e2b702ad3f452671e335f7 Mon Sep 17 00:00:00 2001 From: Vladimir Kondratiev Date: Wed, 1 Oct 2014 15:05:25 +0300 Subject: wil6210: atomic I/O for the card memory Introduce netdev IOCTLs, to be used by the debug tools. Allows to read/write single dword value or memory block, aligned to dword Different address modes supported: - BAR offset - Firmware "linker" address - target's AHB bus Signed-off-by: Vladimir Kondratiev Signed-off-by: John W. Linville --- MAINTAINERS | 1 + drivers/net/wireless/ath/wil6210/Makefile | 1 + drivers/net/wireless/ath/wil6210/ioctl.c | 173 +++++++++++++++++++++++++++++ drivers/net/wireless/ath/wil6210/netdev.c | 12 ++ drivers/net/wireless/ath/wil6210/wil6210.h | 2 + include/uapi/linux/wil6210_uapi.h | 87 +++++++++++++++ 6 files changed, 276 insertions(+) create mode 100644 drivers/net/wireless/ath/wil6210/ioctl.c create mode 100644 include/uapi/linux/wil6210_uapi.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 482888d80431..66de6b2923d4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1611,6 +1611,7 @@ L: wil6210@qca.qualcomm.com S: Supported W: http://wireless.kernel.org/en/users/Drivers/wil6210 F: drivers/net/wireless/ath/wil6210/ +F: include/uapi/linux/wil6210_uapi.h CARL9170 LINUX COMMUNITY WIRELESS DRIVER M: Christian Lamparter diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile index 05f29a6b1ba8..8ad4b5f97e04 100644 --- a/drivers/net/wireless/ath/wil6210/Makefile +++ b/drivers/net/wireless/ath/wil6210/Makefile @@ -10,6 +10,7 @@ wil6210-y += interrupt.o wil6210-y += txrx.o wil6210-y += debug.o wil6210-y += rx_reorder.o +wil6210-y += ioctl.o wil6210-y += fw.o wil6210-$(CONFIG_WIL6210_TRACING) += trace.o wil6210-y += wil_platform.o diff --git a/drivers/net/wireless/ath/wil6210/ioctl.c b/drivers/net/wireless/ath/wil6210/ioctl.c new file mode 100644 index 000000000000..e9c0673819c6 --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/ioctl.c @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2014 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#include "wil6210.h" +#include + +#define wil_hex_dump_ioctl(prefix_str, buf, len) \ + print_hex_dump_debug("DBG[IOC ]" prefix_str, \ + DUMP_PREFIX_OFFSET, 16, 1, buf, len, true) +#define wil_dbg_ioctl(wil, fmt, arg...) wil_dbg(wil, "DBG[IOC ]" fmt, ##arg) + +static void __iomem *wil_ioc_addr(struct wil6210_priv *wil, uint32_t addr, + uint32_t size, enum wil_memio_op op) +{ + void __iomem *a; + u32 off; + + switch (op & wil_mmio_addr_mask) { + case wil_mmio_addr_linker: + a = wmi_buffer(wil, cpu_to_le32(addr)); + break; + case wil_mmio_addr_ahb: + a = wmi_addr(wil, addr); + break; + case wil_mmio_addr_bar: + a = wmi_addr(wil, addr + WIL6210_FW_HOST_OFF); + break; + default: + wil_err(wil, "Unsupported address mode, op = 0x%08x\n", op); + return NULL; + } + + off = a - wil->csr; + if (size >= WIL6210_MEM_SIZE - off) { + wil_err(wil, "Requested block does not fit into memory: " + "off = 0x%08x size = 0x%08x\n", off, size); + return NULL; + } + + return a; +} + +static int wil_ioc_memio_dword(struct wil6210_priv *wil, void __user *data) +{ + struct wil_memio io; + void __iomem *a; + bool need_copy = false; + + if (copy_from_user(&io, data, sizeof(io))) + return -EFAULT; + + wil_dbg_ioctl(wil, "IO: addr = 0x%08x val = 0x%08x op = 0x%08x\n", + io.addr, io.val, io.op); + + a = wil_ioc_addr(wil, io.addr, sizeof(u32), io.op); + if (!a) { + wil_err(wil, "invalid address 0x%08x, op = 0x%08x\n", io.addr, + io.op); + return -EINVAL; + } + /* operation */ + switch (io.op & wil_mmio_op_mask) { + case wil_mmio_read: + io.val = ioread32(a); + need_copy = true; + break; + case wil_mmio_write: + iowrite32(io.val, a); + wmb(); /* make sure write propagated to HW */ + break; + default: + wil_err(wil, "Unsupported operation, op = 0x%08x\n", io.op); + return -EINVAL; + } + + if (need_copy) { + wil_dbg_ioctl(wil, "IO done: addr = 0x%08x" + " val = 0x%08x op = 0x%08x\n", + io.addr, io.val, io.op); + if (copy_to_user(data, &io, sizeof(io))) + return -EFAULT; + } + + return 0; +} + +static int wil_ioc_memio_block(struct wil6210_priv *wil, void __user *data) +{ + struct wil_memio_block io; + void *block; + void __iomem *a; + int rc = 0; + + if (copy_from_user(&io, data, sizeof(io))) + return -EFAULT; + + wil_dbg_ioctl(wil, "IO: addr = 0x%08x size = 0x%08x op = 0x%08x\n", + io.addr, io.size, io.op); + + /* size */ + if (io.size % 4) { + wil_err(wil, "size is not multiple of 4: 0x%08x\n", io.size); + return -EINVAL; + } + + a = wil_ioc_addr(wil, io.addr, io.size, io.op); + if (!a) { + wil_err(wil, "invalid address 0x%08x, op = 0x%08x\n", io.addr, + io.op); + return -EINVAL; + } + + block = kmalloc(io.size, GFP_USER); + if (!block) + return -ENOMEM; + + /* operation */ + switch (io.op & wil_mmio_op_mask) { + case wil_mmio_read: + wil_memcpy_fromio_32(block, a, io.size); + wil_hex_dump_ioctl("Read ", block, io.size); + if (copy_to_user(io.block, block, io.size)) { + rc = -EFAULT; + goto out_free; + } + break; + case wil_mmio_write: + if (copy_from_user(block, io.block, io.size)) { + rc = -EFAULT; + goto out_free; + } + wil_memcpy_toio_32(a, block, io.size); + wmb(); /* make sure write propagated to HW */ + wil_hex_dump_ioctl("Write ", block, io.size); + break; + default: + wil_err(wil, "Unsupported operation, op = 0x%08x\n", io.op); + rc = -EINVAL; + break; + } + +out_free: + kfree(block); + return rc; +} + +int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd) +{ + switch (cmd) { + case WIL_IOCTL_MEMIO: + return wil_ioc_memio_dword(wil, data); + case WIL_IOCTL_MEMIO_BLOCK: + return wil_ioc_memio_block(wil, data); + default: + wil_dbg_ioctl(wil, "Unsupported IOCTL 0x%04x\n", cmd); + return -ENOIOCTLCMD; + } +} diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index c3f0ddfaa5da..239965106c05 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -52,6 +52,17 @@ static int wil_change_mtu(struct net_device *ndev, int new_mtu) return 0; } +static int wil_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) +{ + struct wil6210_priv *wil = ndev_to_wil(ndev); + + int ret = wil_ioctl(wil, ifr->ifr_data, cmd); + + wil_dbg_misc(wil, "ioctl(0x%04x) -> %d\n", cmd, ret); + + return ret; +} + static const struct net_device_ops wil_netdev_ops = { .ndo_open = wil_open, .ndo_stop = wil_stop, @@ -59,6 +70,7 @@ static const struct net_device_ops wil_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = wil_change_mtu, + .ndo_do_ioctl = wil_do_ioctl, }; static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget) diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 2991609885f7..61cceca155a2 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -595,5 +595,7 @@ void wil6210_unmask_irq_rx(struct wil6210_priv *wil); int wil_iftype_nl2wmi(enum nl80211_iftype type); +int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd); int wil_request_firmware(struct wil6210_priv *wil, const char *name); + #endif /* __WIL6210_H__ */ diff --git a/include/uapi/linux/wil6210_uapi.h b/include/uapi/linux/wil6210_uapi.h new file mode 100644 index 000000000000..6a3cddd156c4 --- /dev/null +++ b/include/uapi/linux/wil6210_uapi.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2014 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __WIL6210_UAPI_H__ +#define __WIL6210_UAPI_H__ + +#if !defined(__KERNEL__) +#define __user +#endif + +#include + +/* Numbers SIOCDEVPRIVATE and SIOCDEVPRIVATE + 1 + * are used by Android devices to implement PNO (preferred network offload). + * Albeit it is temporary solution, use different numbers to avoid conflicts + */ + +/** + * Perform 32-bit I/O operation to the card memory + * + * User code should arrange data in memory like this: + * + * struct wil_memio io; + * struct ifreq ifr = { + * .ifr_data = &io, + * }; + */ +#define WIL_IOCTL_MEMIO (SIOCDEVPRIVATE + 2) + +/** + * Perform block I/O operation to the card memory + * + * User code should arrange data in memory like this: + * + * void *buf; + * struct wil_memio_block io = { + * .block = buf, + * }; + * struct ifreq ifr = { + * .ifr_data = &io, + * }; + */ +#define WIL_IOCTL_MEMIO_BLOCK (SIOCDEVPRIVATE + 3) + +/** + * operation to perform + * + * @wil_mmio_op_mask - bits defining operation, + * @wil_mmio_addr_mask - bits defining addressing mode + */ +enum wil_memio_op { + wil_mmio_read = 0, + wil_mmio_write = 1, + wil_mmio_op_mask = 0xff, + wil_mmio_addr_linker = 0 << 8, + wil_mmio_addr_ahb = 1 << 8, + wil_mmio_addr_bar = 2 << 8, + wil_mmio_addr_mask = 0xff00, +}; + +struct wil_memio { + uint32_t op; /* enum wil_memio_op */ + uint32_t addr; /* should be 32-bit aligned */ + uint32_t val; +}; + +struct wil_memio_block { + uint32_t op; /* enum wil_memio_op */ + uint32_t addr; /* should be 32-bit aligned */ + uint32_t size; /* should be multiple of 4 */ + void __user *block; /* block address */ +}; + +#endif /* __WIL6210_UAPI_H__ */ -- cgit v1.2.3-59-g8ed1b From 38df6492eb511d2a6823303cb1a194c4fe423154 Mon Sep 17 00:00:00 2001 From: Mark Einon Date: Tue, 30 Sep 2014 22:29:46 +0100 Subject: et131x: Add PCIe gigabit ethernet driver et131x to drivers/net This adds the ethernet driver for Agere et131x devices to drivers/net/ethernet. The driver being added has been in the staging tree for some time, and will be removed from there in a seperate patch. This one merely disables the staging version to prevent two instances being built. Signed-off-by: Mark Einon Signed-off-by: David S. Miller --- MAINTAINERS | 10 +- drivers/net/ethernet/Kconfig | 1 + drivers/net/ethernet/Makefile | 1 + drivers/net/ethernet/agere/Kconfig | 31 + drivers/net/ethernet/agere/Makefile | 5 + drivers/net/ethernet/agere/et131x.c | 4121 +++++++++++++++++++++++++++++++++++ drivers/net/ethernet/agere/et131x.h | 1433 ++++++++++++ drivers/staging/Kconfig | 2 - drivers/staging/Makefile | 1 - 9 files changed, 5597 insertions(+), 8 deletions(-) create mode 100644 drivers/net/ethernet/agere/Kconfig create mode 100644 drivers/net/ethernet/agere/Makefile create mode 100644 drivers/net/ethernet/agere/et131x.c create mode 100644 drivers/net/ethernet/agere/et131x.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index c2e7a06c585b..78b9f3b77a87 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3550,6 +3550,11 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kristoffer/linux-hpc.git F: drivers/video/fbdev/s1d13xxxfb.c F: include/video/s1d13xxxfb.h +ET131X NETWORK DRIVER +M: Mark Einon +S: Odd Fixes +F: drivers/net/ethernet/agere/ + ETHERNET BRIDGE M: Stephen Hemminger L: bridge@lists.linux-foundation.org @@ -8694,11 +8699,6 @@ M: H Hartley Sweeten S: Odd Fixes F: drivers/staging/comedi/ -STAGING - ET131X NETWORK DRIVER -M: Mark Einon -S: Odd Fixes -F: drivers/staging/et131x/ - STAGING - FLARION FT1000 DRIVERS M: Marek Belisko S: Odd Fixes diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 0005e3792e6d..1ed1fbba5d58 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -20,6 +20,7 @@ config SUNGEM_PHY source "drivers/net/ethernet/3com/Kconfig" source "drivers/net/ethernet/adaptec/Kconfig" source "drivers/net/ethernet/aeroflex/Kconfig" +source "drivers/net/ethernet/agere/Kconfig" source "drivers/net/ethernet/allwinner/Kconfig" source "drivers/net/ethernet/alteon/Kconfig" source "drivers/net/ethernet/altera/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 153bf2dd9fad..6e0b629e9859 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_NET_VENDOR_3COM) += 3com/ obj-$(CONFIG_NET_VENDOR_8390) += 8390/ obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/ obj-$(CONFIG_GRETH) += aeroflex/ +obj-$(CONFIG_NET_VENDOR_AGERE) += agere/ obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/ obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/ obj-$(CONFIG_ALTERA_TSE) += altera/ diff --git a/drivers/net/ethernet/agere/Kconfig b/drivers/net/ethernet/agere/Kconfig new file mode 100644 index 000000000000..63e805de619e --- /dev/null +++ b/drivers/net/ethernet/agere/Kconfig @@ -0,0 +1,31 @@ +# +# Agere device configuration +# + +config NET_VENDOR_AGERE + bool "Agere devices" + default y + depends on PCI + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Agere devices. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_AGERE + +config ET131X + tristate "Agere ET-1310 Gigabit Ethernet support" + depends on PCI + select PHYLIB + ---help--- + This driver supports Agere ET-1310 ethernet adapters. + + To compile this driver as a module, choose M here. The module + will be called et131x. + +endif # NET_VENDOR_AGERE diff --git a/drivers/net/ethernet/agere/Makefile b/drivers/net/ethernet/agere/Makefile new file mode 100644 index 000000000000..027ff9453fe1 --- /dev/null +++ b/drivers/net/ethernet/agere/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Agere ET-131x ethernet driver +# + +obj-$(CONFIG_ET131X) += et131x.o diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c new file mode 100644 index 000000000000..384dc163851b --- /dev/null +++ b/drivers/net/ethernet/agere/et131x.c @@ -0,0 +1,4121 @@ +/* Agere Systems Inc. + * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs + * + * Copyright © 2005 Agere Systems Inc. + * All rights reserved. + * http://www.agere.com + * + * Copyright (c) 2011 Mark Einon + * + *------------------------------------------------------------------------------ + * + * SOFTWARE LICENSE + * + * This software is provided subject to the following terms and conditions, + * which you should read carefully before using the software. Using this + * software indicates your acceptance of these terms and conditions. If you do + * not agree with these terms and conditions, do not use the software. + * + * Copyright © 2005 Agere Systems Inc. + * All rights reserved. + * + * Redistribution and use in source or binary forms, with or without + * modifications, are permitted provided that the following conditions are met: + * + * . Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following Disclaimer as comments in the code as + * well as in the documentation and/or other materials provided with the + * distribution. + * + * . Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following Disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * . Neither the name of Agere Systems Inc. nor the names of the contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * Disclaimer + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY + * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN + * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "et131x.h" + +MODULE_AUTHOR("Victor Soriano "); +MODULE_AUTHOR("Mark Einon "); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere Systems"); + +/* EEPROM defines */ +#define MAX_NUM_REGISTER_POLLS 1000 +#define MAX_NUM_WRITE_RETRIES 2 + +/* MAC defines */ +#define COUNTER_WRAP_16_BIT 0x10000 +#define COUNTER_WRAP_12_BIT 0x1000 + +/* PCI defines */ +#define INTERNAL_MEM_SIZE 0x400 /* 1024 of internal memory */ +#define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */ + +/* ISR defines */ +/* For interrupts, normal running is: + * rxdma_xfr_done, phy_interrupt, mac_stat_interrupt, + * watchdog_interrupt & txdma_xfer_done + * + * In both cases, when flow control is enabled for either Tx or bi-direction, + * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the + * buffer rings are running low. + */ +#define INT_MASK_DISABLE 0xffffffff + +/* NOTE: Masking out MAC_STAT Interrupt for now... + * #define INT_MASK_ENABLE 0xfff6bf17 + * #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7 + */ +#define INT_MASK_ENABLE 0xfffebf17 +#define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7 + +/* General defines */ +/* Packet and header sizes */ +#define NIC_MIN_PACKET_SIZE 60 + +/* Multicast list size */ +#define NIC_MAX_MCAST_LIST 128 + +/* Supported Filters */ +#define ET131X_PACKET_TYPE_DIRECTED 0x0001 +#define ET131X_PACKET_TYPE_MULTICAST 0x0002 +#define ET131X_PACKET_TYPE_BROADCAST 0x0004 +#define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008 +#define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010 + +/* Tx Timeout */ +#define ET131X_TX_TIMEOUT (1 * HZ) +#define NIC_SEND_HANG_THRESHOLD 0 + +/* MP_ADAPTER flags */ +#define FMP_ADAPTER_INTERRUPT_IN_USE 0x00000008 + +/* MP_SHARED flags */ +#define FMP_ADAPTER_LOWER_POWER 0x00200000 + +#define FMP_ADAPTER_NON_RECOVER_ERROR 0x00800000 +#define FMP_ADAPTER_HARDWARE_ERROR 0x04000000 + +#define FMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000 + +/* Some offsets in PCI config space that are actually used. */ +#define ET1310_PCI_MAC_ADDRESS 0xA4 +#define ET1310_PCI_EEPROM_STATUS 0xB2 +#define ET1310_PCI_ACK_NACK 0xC0 +#define ET1310_PCI_REPLAY 0xC2 +#define ET1310_PCI_L0L1LATENCY 0xCF + +/* PCI Product IDs */ +#define ET131X_PCI_DEVICE_ID_GIG 0xED00 /* ET1310 1000 Base-T 8 */ +#define ET131X_PCI_DEVICE_ID_FAST 0xED01 /* ET1310 100 Base-T */ + +/* Define order of magnitude converter */ +#define NANO_IN_A_MICRO 1000 + +#define PARM_RX_NUM_BUFS_DEF 4 +#define PARM_RX_TIME_INT_DEF 10 +#define PARM_RX_MEM_END_DEF 0x2bc +#define PARM_TX_TIME_INT_DEF 40 +#define PARM_TX_NUM_BUFS_DEF 4 +#define PARM_DMA_CACHE_DEF 0 + +/* RX defines */ +#define FBR_CHUNKS 32 +#define MAX_DESC_PER_RING_RX 1024 + +/* number of RFDs - default and min */ +#define RFD_LOW_WATER_MARK 40 +#define NIC_DEFAULT_NUM_RFD 1024 +#define NUM_FBRS 2 + +#define MAX_PACKETS_HANDLED 256 + +#define ALCATEL_MULTICAST_PKT 0x01000000 +#define ALCATEL_BROADCAST_PKT 0x02000000 + +/* typedefs for Free Buffer Descriptors */ +struct fbr_desc { + u32 addr_lo; + u32 addr_hi; + u32 word2; /* Bits 10-31 reserved, 0-9 descriptor */ +}; + +/* Packet Status Ring Descriptors + * + * Word 0: + * + * top 16 bits are from the Alcatel Status Word as enumerated in + * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2) + * + * 0: hp hash pass + * 1: ipa IP checksum assist + * 2: ipp IP checksum pass + * 3: tcpa TCP checksum assist + * 4: tcpp TCP checksum pass + * 5: wol WOL Event + * 6: rxmac_error RXMAC Error Indicator + * 7: drop Drop packet + * 8: ft Frame Truncated + * 9: jp Jumbo Packet + * 10: vp VLAN Packet + * 11-15: unused + * 16: asw_prev_pkt_dropped e.g. IFG too small on previous + * 17: asw_RX_DV_event short receive event detected + * 18: asw_false_carrier_event bad carrier since last good packet + * 19: asw_code_err one or more nibbles signalled as errors + * 20: asw_CRC_err CRC error + * 21: asw_len_chk_err frame length field incorrect + * 22: asw_too_long frame length > 1518 bytes + * 23: asw_OK valid CRC + no code error + * 24: asw_multicast has a multicast address + * 25: asw_broadcast has a broadcast address + * 26: asw_dribble_nibble spurious bits after EOP + * 27: asw_control_frame is a control frame + * 28: asw_pause_frame is a pause frame + * 29: asw_unsupported_op unsupported OP code + * 30: asw_VLAN_tag VLAN tag detected + * 31: asw_long_evt Rx long event + * + * Word 1: + * 0-15: length length in bytes + * 16-25: bi Buffer Index + * 26-27: ri Ring Index + * 28-31: reserved + */ +struct pkt_stat_desc { + u32 word0; + u32 word1; +}; + +/* Typedefs for the RX DMA status word */ + +/* rx status word 0 holds part of the status bits of the Rx DMA engine + * that get copied out to memory by the ET-1310. Word 0 is a 32 bit word + * which contains the Free Buffer ring 0 and 1 available offset. + * + * bit 0-9 FBR1 offset + * bit 10 Wrap flag for FBR1 + * bit 16-25 FBR0 offset + * bit 26 Wrap flag for FBR0 + */ + +/* RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine + * that get copied out to memory by the ET-1310. Word 3 is a 32 bit word + * which contains the Packet Status Ring available offset. + * + * bit 0-15 reserved + * bit 16-27 PSRoffset + * bit 28 PSRwrap + * bit 29-31 unused + */ + +/* struct rx_status_block is a structure representing the status of the Rx + * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020 + */ +struct rx_status_block { + u32 word0; + u32 word1; +}; + +/* Structure for look-up table holding free buffer ring pointers, addresses + * and state. + */ +struct fbr_lookup { + void *virt[MAX_DESC_PER_RING_RX]; + u32 bus_high[MAX_DESC_PER_RING_RX]; + u32 bus_low[MAX_DESC_PER_RING_RX]; + void *ring_virtaddr; + dma_addr_t ring_physaddr; + void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS]; + dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS]; + u32 local_full; + u32 num_entries; + dma_addr_t buffsize; +}; + +/* struct rx_ring is the structure representing the adaptor's local + * reference(s) to the rings + */ +struct rx_ring { + struct fbr_lookup *fbr[NUM_FBRS]; + void *ps_ring_virtaddr; + dma_addr_t ps_ring_physaddr; + u32 local_psr_full; + u32 psr_entries; + + struct rx_status_block *rx_status_block; + dma_addr_t rx_status_bus; + + struct list_head recv_list; + u32 num_ready_recv; + + u32 num_rfd; + + bool unfinished_receives; +}; + +/* TX defines */ +/* word 2 of the control bits in the Tx Descriptor ring for the ET-1310 + * + * 0-15: length of packet + * 16-27: VLAN tag + * 28: VLAN CFI + * 29-31: VLAN priority + * + * word 3 of the control bits in the Tx Descriptor ring for the ET-1310 + * + * 0: last packet in the sequence + * 1: first packet in the sequence + * 2: interrupt the processor when this pkt sent + * 3: Control word - no packet data + * 4: Issue half-duplex backpressure : XON/XOFF + * 5: send pause frame + * 6: Tx frame has error + * 7: append CRC + * 8: MAC override + * 9: pad packet + * 10: Packet is a Huge packet + * 11: append VLAN tag + * 12: IP checksum assist + * 13: TCP checksum assist + * 14: UDP checksum assist + */ +#define TXDESC_FLAG_LASTPKT 0x0001 +#define TXDESC_FLAG_FIRSTPKT 0x0002 +#define TXDESC_FLAG_INTPROC 0x0004 + +/* struct tx_desc represents each descriptor on the ring */ +struct tx_desc { + u32 addr_hi; + u32 addr_lo; + u32 len_vlan; /* control words how to xmit the */ + u32 flags; /* data (detailed above) */ +}; + +/* The status of the Tx DMA engine it sits in free memory, and is pointed to + * by 0x101c / 0x1020. This is a DMA10 type + */ + +/* TCB (Transmit Control Block: Host Side) */ +struct tcb { + struct tcb *next; /* Next entry in ring */ + u32 count; /* Used to spot stuck/lost packets */ + u32 stale; /* Used to spot stuck/lost packets */ + struct sk_buff *skb; /* Network skb we are tied to */ + u32 index; /* Ring indexes */ + u32 index_start; +}; + +/* Structure representing our local reference(s) to the ring */ +struct tx_ring { + /* TCB (Transmit Control Block) memory and lists */ + struct tcb *tcb_ring; + + /* List of TCBs that are ready to be used */ + struct tcb *tcb_qhead; + struct tcb *tcb_qtail; + + /* list of TCBs that are currently being sent. */ + struct tcb *send_head; + struct tcb *send_tail; + int used; + + /* The actual descriptor ring */ + struct tx_desc *tx_desc_ring; + dma_addr_t tx_desc_ring_pa; + + /* send_idx indicates where we last wrote to in the descriptor ring. */ + u32 send_idx; + + /* The location of the write-back status block */ + u32 *tx_status; + dma_addr_t tx_status_pa; + + /* Packets since the last IRQ: used for interrupt coalescing */ + int since_irq; +}; + +/* Do not change these values: if changed, then change also in respective + * TXdma and Rxdma engines + */ +#define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */ +#define NUM_TCB 64 + +/* These values are all superseded by registry entries to facilitate tuning. + * Once the desired performance has been achieved, the optimal registry values + * should be re-populated to these #defines: + */ +#define TX_ERROR_PERIOD 1000 + +#define LO_MARK_PERCENT_FOR_PSR 15 +#define LO_MARK_PERCENT_FOR_RX 15 + +/* RFD (Receive Frame Descriptor) */ +struct rfd { + struct list_head list_node; + struct sk_buff *skb; + u32 len; /* total size of receive frame */ + u16 bufferindex; + u8 ringindex; +}; + +/* Flow Control */ +#define FLOW_BOTH 0 +#define FLOW_TXONLY 1 +#define FLOW_RXONLY 2 +#define FLOW_NONE 3 + +/* Struct to define some device statistics */ +struct ce_stats { + u32 multicast_pkts_rcvd; + u32 rcvd_pkts_dropped; + + u32 tx_underflows; + u32 tx_collisions; + u32 tx_excessive_collisions; + u32 tx_first_collisions; + u32 tx_late_collisions; + u32 tx_max_pkt_errs; + u32 tx_deferred; + + u32 rx_overflows; + u32 rx_length_errs; + u32 rx_align_errs; + u32 rx_crc_errs; + u32 rx_code_violations; + u32 rx_other_errs; + + u32 interrupt_status; +}; + +/* The private adapter structure */ +struct et131x_adapter { + struct net_device *netdev; + struct pci_dev *pdev; + struct mii_bus *mii_bus; + struct phy_device *phydev; + struct napi_struct napi; + + /* Flags that indicate current state of the adapter */ + u32 flags; + + /* local link state, to determine if a state change has occurred */ + int link; + + /* Configuration */ + u8 rom_addr[ETH_ALEN]; + u8 addr[ETH_ALEN]; + bool has_eeprom; + u8 eeprom_data[2]; + + spinlock_t tcb_send_qlock; /* protects the tx_ring send tcb list */ + spinlock_t tcb_ready_qlock; /* protects the tx_ring ready tcb list */ + spinlock_t rcv_lock; /* protects the rx_ring receive list */ + + /* Packet Filter and look ahead size */ + u32 packet_filter; + + /* multicast list */ + u32 multicast_addr_count; + u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN]; + + /* Pointer to the device's PCI register space */ + struct address_map __iomem *regs; + + /* Registry parameters */ + u8 wanted_flow; /* Flow we want for 802.3x flow control */ + u32 registry_jumbo_packet; /* Max supported ethernet packet size */ + + /* Derived from the registry: */ + u8 flow; /* flow control validated by the far-end */ + + /* Minimize init-time */ + struct timer_list error_timer; + + /* variable putting the phy into coma mode when boot up with no cable + * plugged in after 5 seconds + */ + u8 boot_coma; + + /* Tx Memory Variables */ + struct tx_ring tx_ring; + + /* Rx Memory Variables */ + struct rx_ring rx_ring; + + struct ce_stats stats; +}; + +static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status) +{ + u32 reg; + int i; + + /* 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and + * bits 7,1:0 both equal to 1, at least once after reset. + * Subsequent operations need only to check that bits 1:0 are equal + * to 1 prior to starting a single byte read/write + */ + for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) { + if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, ®)) + return -EIO; + + /* I2C idle and Phy Queue Avail both true */ + if ((reg & 0x3000) == 0x3000) { + if (status) + *status = reg; + return reg & 0xFF; + } + } + return -ETIMEDOUT; +} + +static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data) +{ + struct pci_dev *pdev = adapter->pdev; + int index = 0; + int retries; + int err = 0; + int writeok = 0; + u32 status; + u32 val = 0; + + /* For an EEPROM, an I2C single byte write is defined as a START + * condition followed by the device address, EEPROM address, one byte + * of data and a STOP condition. The STOP condition will trigger the + * EEPROM's internally timed write cycle to the nonvolatile memory. + * All inputs are disabled during this write cycle and the EEPROM will + * not respond to any access until the internal write is complete. + */ + err = eeprom_wait_ready(pdev, NULL); + if (err < 0) + return err; + + /* 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0, + * and bits 1:0 both =0. Bit 5 should be set according to the + * type of EEPROM being accessed (1=two byte addressing, 0=one + * byte addressing). + */ + if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, + LBCIF_CONTROL_LBCIF_ENABLE | + LBCIF_CONTROL_I2C_WRITE)) + return -EIO; + + /* Prepare EEPROM address for Step 3 */ + for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) { + if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) + break; + /* Write the data to the LBCIF Data Register (the I2C write + * will begin). + */ + if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data)) + break; + /* Monitor bit 1:0 of the LBCIF Status Register. When bits + * 1:0 are both equal to 1, the I2C write has completed and the + * internal write cycle of the EEPROM is about to start. + * (bits 1:0 = 01 is a legal state while waiting from both + * equal to 1, but bits 1:0 = 10 is invalid and implies that + * something is broken). + */ + err = eeprom_wait_ready(pdev, &status); + if (err < 0) + return 0; + + /* Check bit 3 of the LBCIF Status Register. If equal to 1, + * an error has occurred.Don't break here if we are revision + * 1, this is so we do a blind write for load bug. + */ + if ((status & LBCIF_STATUS_GENERAL_ERROR) && + adapter->pdev->revision == 0) + break; + + /* Check bit 2 of the LBCIF Status Register. If equal to 1 an + * ACK error has occurred on the address phase of the write. + * This could be due to an actual hardware failure or the + * EEPROM may still be in its internal write cycle from a + * previous write. This write operation was ignored and must be + *repeated later. + */ + if (status & LBCIF_STATUS_ACK_ERROR) { + /* This could be due to an actual hardware failure + * or the EEPROM may still be in its internal write + * cycle from a previous write. This write operation + * was ignored and must be repeated later. + */ + udelay(10); + continue; + } + + writeok = 1; + break; + } + + udelay(10); + + while (1) { + if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, + LBCIF_CONTROL_LBCIF_ENABLE)) + writeok = 0; + + /* Do read until internal ACK_ERROR goes away meaning write + * completed + */ + do { + pci_write_config_dword(pdev, + LBCIF_ADDRESS_REGISTER, + addr); + do { + pci_read_config_dword(pdev, + LBCIF_DATA_REGISTER, + &val); + } while ((val & 0x00010000) == 0); + } while (val & 0x00040000); + + if ((val & 0xFF00) != 0xC000 || index == 10000) + break; + index++; + } + return writeok ? 0 : -EIO; +} + +static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata) +{ + struct pci_dev *pdev = adapter->pdev; + int err; + u32 status; + + /* A single byte read is similar to the single byte write, with the + * exception of the data flow: + */ + err = eeprom_wait_ready(pdev, NULL); + if (err < 0) + return err; + /* Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0, + * and bits 1:0 both =0. Bit 5 should be set according to the type + * of EEPROM being accessed (1=two byte addressing, 0=one byte + * addressing). + */ + if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, + LBCIF_CONTROL_LBCIF_ENABLE)) + return -EIO; + /* Write the address to the LBCIF Address Register (I2C read will + * begin). + */ + if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) + return -EIO; + /* Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read + * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure + * has occurred). + */ + err = eeprom_wait_ready(pdev, &status); + if (err < 0) + return err; + /* Regardless of error status, read data byte from LBCIF Data + * Register. + */ + *pdata = err; + + return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0; +} + +static int et131x_init_eeprom(struct et131x_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + u8 eestatus; + + pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus); + + /* THIS IS A WORKAROUND: + * I need to call this function twice to get my card in a + * LG M1 Express Dual running. I tried also a msleep before this + * function, because I thought there could be some time conditions + * but it didn't work. Call the whole function twice also work. + */ + if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) { + dev_err(&pdev->dev, + "Could not read PCI config space for EEPROM Status\n"); + return -EIO; + } + + /* Determine if the error(s) we care about are present. If they are + * present we need to fail. + */ + if (eestatus & 0x4C) { + int write_failed = 0; + + if (pdev->revision == 0x01) { + int i; + static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF }; + + /* Re-write the first 4 bytes if we have an eeprom + * present and the revision id is 1, this fixes the + * corruption seen with 1310 B Silicon + */ + for (i = 0; i < 3; i++) + if (eeprom_write(adapter, i, eedata[i]) < 0) + write_failed = 1; + } + if (pdev->revision != 0x01 || write_failed) { + dev_err(&pdev->dev, + "Fatal EEPROM Status Error - 0x%04x\n", + eestatus); + + /* This error could mean that there was an error + * reading the eeprom or that the eeprom doesn't exist. + * We will treat each case the same and not try to + * gather additional information that normally would + * come from the eeprom, like MAC Address + */ + adapter->has_eeprom = 0; + return -EIO; + } + } + adapter->has_eeprom = 1; + + /* Read the EEPROM for information regarding LED behavior. Refer to + * et131x_xcvr_init() for its use. + */ + eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]); + eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]); + + if (adapter->eeprom_data[0] != 0xcd) + /* Disable all optional features */ + adapter->eeprom_data[1] = 0x00; + + return 0; +} + +static void et131x_rx_dma_enable(struct et131x_adapter *adapter) +{ + /* Setup the receive dma configuration register for normal operation */ + u32 csr = ET_RXDMA_CSR_FBR1_ENABLE; + struct rx_ring *rx_ring = &adapter->rx_ring; + + if (rx_ring->fbr[1]->buffsize == 4096) + csr |= ET_RXDMA_CSR_FBR1_SIZE_LO; + else if (rx_ring->fbr[1]->buffsize == 8192) + csr |= ET_RXDMA_CSR_FBR1_SIZE_HI; + else if (rx_ring->fbr[1]->buffsize == 16384) + csr |= ET_RXDMA_CSR_FBR1_SIZE_LO | ET_RXDMA_CSR_FBR1_SIZE_HI; + + csr |= ET_RXDMA_CSR_FBR0_ENABLE; + if (rx_ring->fbr[0]->buffsize == 256) + csr |= ET_RXDMA_CSR_FBR0_SIZE_LO; + else if (rx_ring->fbr[0]->buffsize == 512) + csr |= ET_RXDMA_CSR_FBR0_SIZE_HI; + else if (rx_ring->fbr[0]->buffsize == 1024) + csr |= ET_RXDMA_CSR_FBR0_SIZE_LO | ET_RXDMA_CSR_FBR0_SIZE_HI; + writel(csr, &adapter->regs->rxdma.csr); + + csr = readl(&adapter->regs->rxdma.csr); + if (csr & ET_RXDMA_CSR_HALT_STATUS) { + udelay(5); + csr = readl(&adapter->regs->rxdma.csr); + if (csr & ET_RXDMA_CSR_HALT_STATUS) { + dev_err(&adapter->pdev->dev, + "RX Dma failed to exit halt state. CSR 0x%08x\n", + csr); + } + } +} + +static void et131x_rx_dma_disable(struct et131x_adapter *adapter) +{ + u32 csr; + /* Setup the receive dma configuration register */ + writel(ET_RXDMA_CSR_HALT | ET_RXDMA_CSR_FBR1_ENABLE, + &adapter->regs->rxdma.csr); + csr = readl(&adapter->regs->rxdma.csr); + if (!(csr & ET_RXDMA_CSR_HALT_STATUS)) { + udelay(5); + csr = readl(&adapter->regs->rxdma.csr); + if (!(csr & ET_RXDMA_CSR_HALT_STATUS)) + dev_err(&adapter->pdev->dev, + "RX Dma failed to enter halt state. CSR 0x%08x\n", + csr); + } +} + +static void et131x_tx_dma_enable(struct et131x_adapter *adapter) +{ + /* Setup the transmit dma configuration register for normal + * operation + */ + writel(ET_TXDMA_SNGL_EPKT | (PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT), + &adapter->regs->txdma.csr); +} + +static inline void add_10bit(u32 *v, int n) +{ + *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP); +} + +static inline void add_12bit(u32 *v, int n) +{ + *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP); +} + +static void et1310_config_mac_regs1(struct et131x_adapter *adapter) +{ + struct mac_regs __iomem *macregs = &adapter->regs->mac; + u32 station1; + u32 station2; + u32 ipg; + + /* First we need to reset everything. Write to MAC configuration + * register 1 to perform reset. + */ + writel(ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET | + ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC | + ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC, + ¯egs->cfg1); + + /* Next lets configure the MAC Inter-packet gap register */ + ipg = 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */ + ipg |= 0x50 << 8; /* ifg enforce 0x50 */ + writel(ipg, ¯egs->ipg); + + /* Next lets configure the MAC Half Duplex register */ + /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */ + writel(0x00A1F037, ¯egs->hfdp); + + /* Next lets configure the MAC Interface Control register */ + writel(0, ¯egs->if_ctrl); + + writel(ET_MAC_MIIMGMT_CLK_RST, ¯egs->mii_mgmt_cfg); + + /* Next lets configure the MAC Station Address register. These + * values are read from the EEPROM during initialization and stored + * in the adapter structure. We write what is stored in the adapter + * structure to the MAC Station Address registers high and low. This + * station address is used for generating and checking pause control + * packets. + */ + station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) | + (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT); + station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) | + (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) | + (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) | + adapter->addr[2]; + writel(station1, ¯egs->station_addr_1); + writel(station2, ¯egs->station_addr_2); + + /* Max ethernet packet in bytes that will be passed by the mac without + * being truncated. Allow the MAC to pass 4 more than our max packet + * size. This is 4 for the Ethernet CRC. + * + * Packets larger than (registry_jumbo_packet) that do not contain a + * VLAN ID will be dropped by the Rx function. + */ + writel(adapter->registry_jumbo_packet + 4, ¯egs->max_fm_len); + + /* clear out MAC config reset */ + writel(0, ¯egs->cfg1); +} + +static void et1310_config_mac_regs2(struct et131x_adapter *adapter) +{ + int32_t delay = 0; + struct mac_regs __iomem *mac = &adapter->regs->mac; + struct phy_device *phydev = adapter->phydev; + u32 cfg1; + u32 cfg2; + u32 ifctrl; + u32 ctl; + + ctl = readl(&adapter->regs->txmac.ctl); + cfg1 = readl(&mac->cfg1); + cfg2 = readl(&mac->cfg2); + ifctrl = readl(&mac->if_ctrl); + + /* Set up the if mode bits */ + cfg2 &= ~ET_MAC_CFG2_IFMODE_MASK; + if (phydev->speed == SPEED_1000) { + cfg2 |= ET_MAC_CFG2_IFMODE_1000; + ifctrl &= ~ET_MAC_IFCTRL_PHYMODE; + } else { + cfg2 |= ET_MAC_CFG2_IFMODE_100; + ifctrl |= ET_MAC_IFCTRL_PHYMODE; + } + + cfg1 |= ET_MAC_CFG1_RX_ENABLE | ET_MAC_CFG1_TX_ENABLE | + ET_MAC_CFG1_TX_FLOW; + + cfg1 &= ~(ET_MAC_CFG1_LOOPBACK | ET_MAC_CFG1_RX_FLOW); + if (adapter->flow == FLOW_RXONLY || adapter->flow == FLOW_BOTH) + cfg1 |= ET_MAC_CFG1_RX_FLOW; + writel(cfg1, &mac->cfg1); + + /* Now we need to initialize the MAC Configuration 2 register */ + /* preamble 7, check length, huge frame off, pad crc, crc enable + * full duplex off + */ + cfg2 |= 0x7 << ET_MAC_CFG2_PREAMBLE_SHIFT; + cfg2 |= ET_MAC_CFG2_IFMODE_LEN_CHECK; + cfg2 |= ET_MAC_CFG2_IFMODE_PAD_CRC; + cfg2 |= ET_MAC_CFG2_IFMODE_CRC_ENABLE; + cfg2 &= ~ET_MAC_CFG2_IFMODE_HUGE_FRAME; + cfg2 &= ~ET_MAC_CFG2_IFMODE_FULL_DPLX; + + if (phydev->duplex == DUPLEX_FULL) + cfg2 |= ET_MAC_CFG2_IFMODE_FULL_DPLX; + + ifctrl &= ~ET_MAC_IFCTRL_GHDMODE; + if (phydev->duplex == DUPLEX_HALF) + ifctrl |= ET_MAC_IFCTRL_GHDMODE; + + writel(ifctrl, &mac->if_ctrl); + writel(cfg2, &mac->cfg2); + + do { + udelay(10); + delay++; + cfg1 = readl(&mac->cfg1); + } while ((cfg1 & ET_MAC_CFG1_WAIT) != ET_MAC_CFG1_WAIT && delay < 100); + + if (delay == 100) { + dev_warn(&adapter->pdev->dev, + "Syncd bits did not respond correctly cfg1 word 0x%08x\n", + cfg1); + } + + ctl |= ET_TX_CTRL_TXMAC_ENABLE | ET_TX_CTRL_FC_DISABLE; + writel(ctl, &adapter->regs->txmac.ctl); + + if (adapter->flags & FMP_ADAPTER_LOWER_POWER) { + et131x_rx_dma_enable(adapter); + et131x_tx_dma_enable(adapter); + } +} + +static int et1310_in_phy_coma(struct et131x_adapter *adapter) +{ + u32 pmcsr = readl(&adapter->regs->global.pm_csr); + + return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0; +} + +static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter) +{ + struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; + u32 hash1 = 0; + u32 hash2 = 0; + u32 hash3 = 0; + u32 hash4 = 0; + u32 pm_csr; + + /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision + * the multi-cast LIST. If it is NOT specified, (and "ALL" is not + * specified) then we should pass NO multi-cast addresses to the + * driver. + */ + if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) { + int i; + + /* Loop through our multicast array and set up the device */ + for (i = 0; i < adapter->multicast_addr_count; i++) { + u32 result; + + result = ether_crc(6, adapter->multicast_list[i]); + + result = (result & 0x3F800000) >> 23; + + if (result < 32) { + hash1 |= (1 << result); + } else if ((31 < result) && (result < 64)) { + result -= 32; + hash2 |= (1 << result); + } else if ((63 < result) && (result < 96)) { + result -= 64; + hash3 |= (1 << result); + } else { + result -= 96; + hash4 |= (1 << result); + } + } + } + + /* Write out the new hash to the device */ + pm_csr = readl(&adapter->regs->global.pm_csr); + if (!et1310_in_phy_coma(adapter)) { + writel(hash1, &rxmac->multi_hash1); + writel(hash2, &rxmac->multi_hash2); + writel(hash3, &rxmac->multi_hash3); + writel(hash4, &rxmac->multi_hash4); + } +} + +static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter) +{ + struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; + u32 uni_pf1; + u32 uni_pf2; + u32 uni_pf3; + u32 pm_csr; + + /* Set up unicast packet filter reg 3 to be the first two octets of + * the MAC address for both address + * + * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the + * MAC address for second address + * + * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the + * MAC address for first address + */ + uni_pf3 = (adapter->addr[0] << ET_RX_UNI_PF_ADDR2_1_SHIFT) | + (adapter->addr[1] << ET_RX_UNI_PF_ADDR2_2_SHIFT) | + (adapter->addr[0] << ET_RX_UNI_PF_ADDR1_1_SHIFT) | + adapter->addr[1]; + + uni_pf2 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR2_3_SHIFT) | + (adapter->addr[3] << ET_RX_UNI_PF_ADDR2_4_SHIFT) | + (adapter->addr[4] << ET_RX_UNI_PF_ADDR2_5_SHIFT) | + adapter->addr[5]; + + uni_pf1 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR1_3_SHIFT) | + (adapter->addr[3] << ET_RX_UNI_PF_ADDR1_4_SHIFT) | + (adapter->addr[4] << ET_RX_UNI_PF_ADDR1_5_SHIFT) | + adapter->addr[5]; + + pm_csr = readl(&adapter->regs->global.pm_csr); + if (!et1310_in_phy_coma(adapter)) { + writel(uni_pf1, &rxmac->uni_pf_addr1); + writel(uni_pf2, &rxmac->uni_pf_addr2); + writel(uni_pf3, &rxmac->uni_pf_addr3); + } +} + +static void et1310_config_rxmac_regs(struct et131x_adapter *adapter) +{ + struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; + struct phy_device *phydev = adapter->phydev; + u32 sa_lo; + u32 sa_hi = 0; + u32 pf_ctrl = 0; + u32 __iomem *wolw; + + /* Disable the MAC while it is being configured (also disable WOL) */ + writel(0x8, &rxmac->ctrl); + + /* Initialize WOL to disabled. */ + writel(0, &rxmac->crc0); + writel(0, &rxmac->crc12); + writel(0, &rxmac->crc34); + + /* We need to set the WOL mask0 - mask4 next. We initialize it to + * its default Values of 0x00000000 because there are not WOL masks + * as of this time. + */ + for (wolw = &rxmac->mask0_word0; wolw <= &rxmac->mask4_word3; wolw++) + writel(0, wolw); + + /* Lets setup the WOL Source Address */ + sa_lo = (adapter->addr[2] << ET_RX_WOL_LO_SA3_SHIFT) | + (adapter->addr[3] << ET_RX_WOL_LO_SA4_SHIFT) | + (adapter->addr[4] << ET_RX_WOL_LO_SA5_SHIFT) | + adapter->addr[5]; + writel(sa_lo, &rxmac->sa_lo); + + sa_hi = (u32)(adapter->addr[0] << ET_RX_WOL_HI_SA1_SHIFT) | + adapter->addr[1]; + writel(sa_hi, &rxmac->sa_hi); + + /* Disable all Packet Filtering */ + writel(0, &rxmac->pf_ctrl); + + /* Let's initialize the Unicast Packet filtering address */ + if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) { + et1310_setup_device_for_unicast(adapter); + pf_ctrl |= ET_RX_PFCTRL_UNICST_FILTER_ENABLE; + } else { + writel(0, &rxmac->uni_pf_addr1); + writel(0, &rxmac->uni_pf_addr2); + writel(0, &rxmac->uni_pf_addr3); + } + + /* Let's initialize the Multicast hash */ + if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) { + pf_ctrl |= ET_RX_PFCTRL_MLTCST_FILTER_ENABLE; + et1310_setup_device_for_multicast(adapter); + } + + /* Runt packet filtering. Didn't work in version A silicon. */ + pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << ET_RX_PFCTRL_MIN_PKT_SZ_SHIFT; + pf_ctrl |= ET_RX_PFCTRL_FRAG_FILTER_ENABLE; + + if (adapter->registry_jumbo_packet > 8192) + /* In order to transmit jumbo packets greater than 8k, the + * FIFO between RxMAC and RxDMA needs to be reduced in size + * to (16k - Jumbo packet size). In order to implement this, + * we must use "cut through" mode in the RxMAC, which chops + * packets down into segments which are (max_size * 16). In + * this case we selected 256 bytes, since this is the size of + * the PCI-Express TLP's that the 1310 uses. + * + * seg_en on, fc_en off, size 0x10 + */ + writel(0x41, &rxmac->mcif_ctrl_max_seg); + else + writel(0, &rxmac->mcif_ctrl_max_seg); + + writel(0, &rxmac->mcif_water_mark); + writel(0, &rxmac->mif_ctrl); + writel(0, &rxmac->space_avail); + + /* Initialize the the mif_ctrl register + * bit 3: Receive code error. One or more nibbles were signaled as + * errors during the reception of the packet. Clear this + * bit in Gigabit, set it in 100Mbit. This was derived + * experimentally at UNH. + * bit 4: Receive CRC error. The packet's CRC did not match the + * internally generated CRC. + * bit 5: Receive length check error. Indicates that frame length + * field value in the packet does not match the actual data + * byte length and is not a type field. + * bit 16: Receive frame truncated. + * bit 17: Drop packet enable + */ + if (phydev && phydev->speed == SPEED_100) + writel(0x30038, &rxmac->mif_ctrl); + else + writel(0x30030, &rxmac->mif_ctrl); + + /* Finally we initialize RxMac to be enabled & WOL disabled. Packet + * filter is always enabled since it is where the runt packets are + * supposed to be dropped. For version A silicon, runt packet + * dropping doesn't work, so it is disabled in the pf_ctrl register, + * but we still leave the packet filter on. + */ + writel(pf_ctrl, &rxmac->pf_ctrl); + writel(ET_RX_CTRL_RXMAC_ENABLE | ET_RX_CTRL_WOL_DISABLE, &rxmac->ctrl); +} + +static void et1310_config_txmac_regs(struct et131x_adapter *adapter) +{ + struct txmac_regs __iomem *txmac = &adapter->regs->txmac; + + /* We need to update the Control Frame Parameters + * cfpt - control frame pause timer set to 64 (0x40) + * cfep - control frame extended pause timer set to 0x0 + */ + if (adapter->flow == FLOW_NONE) + writel(0, &txmac->cf_param); + else + writel(0x40, &txmac->cf_param); +} + +static void et1310_config_macstat_regs(struct et131x_adapter *adapter) +{ + struct macstat_regs __iomem *macstat = &adapter->regs->macstat; + u32 __iomem *reg; + + /* initialize all the macstat registers to zero on the device */ + for (reg = &macstat->txrx_0_64_byte_frames; + reg <= &macstat->carry_reg2; reg++) + writel(0, reg); + + /* Unmask any counters that we want to track the overflow of. + * Initially this will be all counters. It may become clear later + * that we do not need to track all counters. + */ + writel(0xFFFFBE32, &macstat->carry_reg1_mask); + writel(0xFFFE7E8B, &macstat->carry_reg2_mask); +} + +static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr, + u8 reg, u16 *value) +{ + struct mac_regs __iomem *mac = &adapter->regs->mac; + int status = 0; + u32 delay = 0; + u32 mii_addr; + u32 mii_cmd; + u32 mii_indicator; + + /* Save a local copy of the registers we are dealing with so we can + * set them back + */ + mii_addr = readl(&mac->mii_mgmt_addr); + mii_cmd = readl(&mac->mii_mgmt_cmd); + + /* Stop the current operation */ + writel(0, &mac->mii_mgmt_cmd); + + /* Set up the register we need to read from on the correct PHY */ + writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr); + + writel(0x1, &mac->mii_mgmt_cmd); + + do { + udelay(50); + delay++; + mii_indicator = readl(&mac->mii_mgmt_indicator); + } while ((mii_indicator & ET_MAC_MGMT_WAIT) && delay < 50); + + /* If we hit the max delay, we could not read the register */ + if (delay == 50) { + dev_warn(&adapter->pdev->dev, + "reg 0x%08x could not be read\n", reg); + dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", + mii_indicator); + + status = -EIO; + goto out; + } + + /* If we hit here we were able to read the register and we need to + * return the value to the caller + */ + *value = readl(&mac->mii_mgmt_stat) & ET_MAC_MIIMGMT_STAT_PHYCRTL_MASK; + +out: + /* Stop the read operation */ + writel(0, &mac->mii_mgmt_cmd); + + /* set the registers we touched back to the state at which we entered + * this function + */ + writel(mii_addr, &mac->mii_mgmt_addr); + writel(mii_cmd, &mac->mii_mgmt_cmd); + + return status; +} + +static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value) +{ + struct phy_device *phydev = adapter->phydev; + + if (!phydev) + return -EIO; + + return et131x_phy_mii_read(adapter, phydev->addr, reg, value); +} + +static int et131x_mii_write(struct et131x_adapter *adapter, u8 addr, u8 reg, + u16 value) +{ + struct mac_regs __iomem *mac = &adapter->regs->mac; + int status = 0; + u32 delay = 0; + u32 mii_addr; + u32 mii_cmd; + u32 mii_indicator; + + /* Save a local copy of the registers we are dealing with so we can + * set them back + */ + mii_addr = readl(&mac->mii_mgmt_addr); + mii_cmd = readl(&mac->mii_mgmt_cmd); + + /* Stop the current operation */ + writel(0, &mac->mii_mgmt_cmd); + + /* Set up the register we need to write to on the correct PHY */ + writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr); + + /* Add the value to write to the registers to the mac */ + writel(value, &mac->mii_mgmt_ctrl); + + do { + udelay(50); + delay++; + mii_indicator = readl(&mac->mii_mgmt_indicator); + } while ((mii_indicator & ET_MAC_MGMT_BUSY) && delay < 100); + + /* If we hit the max delay, we could not write the register */ + if (delay == 100) { + u16 tmp; + + dev_warn(&adapter->pdev->dev, + "reg 0x%08x could not be written", reg); + dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", + mii_indicator); + dev_warn(&adapter->pdev->dev, "command is 0x%08x\n", + readl(&mac->mii_mgmt_cmd)); + + et131x_mii_read(adapter, reg, &tmp); + + status = -EIO; + } + /* Stop the write operation */ + writel(0, &mac->mii_mgmt_cmd); + + /* set the registers we touched back to the state at which we entered + * this function + */ + writel(mii_addr, &mac->mii_mgmt_addr); + writel(mii_cmd, &mac->mii_mgmt_cmd); + + return status; +} + +static void et1310_phy_read_mii_bit(struct et131x_adapter *adapter, + u16 regnum, + u16 bitnum, + u8 *value) +{ + u16 reg; + u16 mask = 1 << bitnum; + + et131x_mii_read(adapter, regnum, ®); + + *value = (reg & mask) >> bitnum; +} + +static void et1310_config_flow_control(struct et131x_adapter *adapter) +{ + struct phy_device *phydev = adapter->phydev; + + if (phydev->duplex == DUPLEX_HALF) { + adapter->flow = FLOW_NONE; + } else { + char remote_pause, remote_async_pause; + + et1310_phy_read_mii_bit(adapter, 5, 10, &remote_pause); + et1310_phy_read_mii_bit(adapter, 5, 11, &remote_async_pause); + + if (remote_pause && remote_async_pause) { + adapter->flow = adapter->wanted_flow; + } else if (remote_pause && !remote_async_pause) { + if (adapter->wanted_flow == FLOW_BOTH) + adapter->flow = FLOW_BOTH; + else + adapter->flow = FLOW_NONE; + } else if (!remote_pause && !remote_async_pause) { + adapter->flow = FLOW_NONE; + } else { + if (adapter->wanted_flow == FLOW_BOTH) + adapter->flow = FLOW_RXONLY; + else + adapter->flow = FLOW_NONE; + } + } +} + +/* et1310_update_macstat_host_counters - Update local copy of the statistics */ +static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter) +{ + struct ce_stats *stats = &adapter->stats; + struct macstat_regs __iomem *macstat = + &adapter->regs->macstat; + + stats->tx_collisions += readl(&macstat->tx_total_collisions); + stats->tx_first_collisions += readl(&macstat->tx_single_collisions); + stats->tx_deferred += readl(&macstat->tx_deferred); + stats->tx_excessive_collisions += + readl(&macstat->tx_multiple_collisions); + stats->tx_late_collisions += readl(&macstat->tx_late_collisions); + stats->tx_underflows += readl(&macstat->tx_undersize_frames); + stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames); + + stats->rx_align_errs += readl(&macstat->rx_align_errs); + stats->rx_crc_errs += readl(&macstat->rx_code_errs); + stats->rcvd_pkts_dropped += readl(&macstat->rx_drops); + stats->rx_overflows += readl(&macstat->rx_oversize_packets); + stats->rx_code_violations += readl(&macstat->rx_fcs_errs); + stats->rx_length_errs += readl(&macstat->rx_frame_len_errs); + stats->rx_other_errs += readl(&macstat->rx_fragment_packets); +} + +/* et1310_handle_macstat_interrupt + * + * One of the MACSTAT counters has wrapped. Update the local copy of + * the statistics held in the adapter structure, checking the "wrap" + * bit for each counter. + */ +static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter) +{ + u32 carry_reg1; + u32 carry_reg2; + + /* Read the interrupt bits from the register(s). These are Clear On + * Write. + */ + carry_reg1 = readl(&adapter->regs->macstat.carry_reg1); + carry_reg2 = readl(&adapter->regs->macstat.carry_reg2); + + writel(carry_reg1, &adapter->regs->macstat.carry_reg1); + writel(carry_reg2, &adapter->regs->macstat.carry_reg2); + + /* We need to do update the host copy of all the MAC_STAT counters. + * For each counter, check it's overflow bit. If the overflow bit is + * set, then increment the host version of the count by one complete + * revolution of the counter. This routine is called when the counter + * block indicates that one of the counters has wrapped. + */ + if (carry_reg1 & (1 << 14)) + adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT; + if (carry_reg1 & (1 << 8)) + adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT; + if (carry_reg1 & (1 << 7)) + adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT; + if (carry_reg1 & (1 << 2)) + adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT; + if (carry_reg1 & (1 << 6)) + adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT; + if (carry_reg1 & (1 << 3)) + adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT; + if (carry_reg1 & (1 << 0)) + adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT; + if (carry_reg2 & (1 << 16)) + adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT; + if (carry_reg2 & (1 << 15)) + adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT; + if (carry_reg2 & (1 << 6)) + adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT; + if (carry_reg2 & (1 << 8)) + adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT; + if (carry_reg2 & (1 << 5)) + adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT; + if (carry_reg2 & (1 << 4)) + adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT; + if (carry_reg2 & (1 << 2)) + adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT; +} + +static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg) +{ + struct net_device *netdev = bus->priv; + struct et131x_adapter *adapter = netdev_priv(netdev); + u16 value; + int ret; + + ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value); + + if (ret < 0) + return ret; + + return value; +} + +static int et131x_mdio_write(struct mii_bus *bus, int phy_addr, + int reg, u16 value) +{ + struct net_device *netdev = bus->priv; + struct et131x_adapter *adapter = netdev_priv(netdev); + + return et131x_mii_write(adapter, phy_addr, reg, value); +} + +/* et1310_phy_power_switch - PHY power control + * @adapter: device to control + * @down: true for off/false for back on + * + * one hundred, ten, one thousand megs + * How would you like to have your LAN accessed + * Can't you see that this code processed + * Phy power, phy power.. + */ +static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down) +{ + u16 data; + struct phy_device *phydev = adapter->phydev; + + et131x_mii_read(adapter, MII_BMCR, &data); + data &= ~BMCR_PDOWN; + if (down) + data |= BMCR_PDOWN; + et131x_mii_write(adapter, phydev->addr, MII_BMCR, data); +} + +/* et131x_xcvr_init - Init the phy if we are setting it into force mode */ +static void et131x_xcvr_init(struct et131x_adapter *adapter) +{ + u16 lcr2; + struct phy_device *phydev = adapter->phydev; + + /* Set the LED behavior such that LED 1 indicates speed (off = + * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates + * link and activity (on for link, blink off for activity). + * + * NOTE: Some customizations have been added here for specific + * vendors; The LED behavior is now determined by vendor data in the + * EEPROM. However, the above description is the default. + */ + if ((adapter->eeprom_data[1] & 0x4) == 0) { + et131x_mii_read(adapter, PHY_LED_2, &lcr2); + + lcr2 &= (ET_LED2_LED_100TX | ET_LED2_LED_1000T); + lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT); + + if ((adapter->eeprom_data[1] & 0x8) == 0) + lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT); + else + lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT); + + et131x_mii_write(adapter, phydev->addr, PHY_LED_2, lcr2); + } +} + +/* et131x_configure_global_regs - configure JAGCore global regs */ +static void et131x_configure_global_regs(struct et131x_adapter *adapter) +{ + struct global_regs __iomem *regs = &adapter->regs->global; + + writel(0, ®s->rxq_start_addr); + writel(INTERNAL_MEM_SIZE - 1, ®s->txq_end_addr); + + if (adapter->registry_jumbo_packet < 2048) { + /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word + * block of RAM that the driver can split between Tx + * and Rx as it desires. Our default is to split it + * 50/50: + */ + writel(PARM_RX_MEM_END_DEF, ®s->rxq_end_addr); + writel(PARM_RX_MEM_END_DEF + 1, ®s->txq_start_addr); + } else if (adapter->registry_jumbo_packet < 8192) { + /* For jumbo packets > 2k but < 8k, split 50-50. */ + writel(INTERNAL_MEM_RX_OFFSET, ®s->rxq_end_addr); + writel(INTERNAL_MEM_RX_OFFSET + 1, ®s->txq_start_addr); + } else { + /* 9216 is the only packet size greater than 8k that + * is available. The Tx buffer has to be big enough + * for one whole packet on the Tx side. We'll make + * the Tx 9408, and give the rest to Rx + */ + writel(0x01b3, ®s->rxq_end_addr); + writel(0x01b4, ®s->txq_start_addr); + } + + /* Initialize the loopback register. Disable all loopbacks. */ + writel(0, ®s->loopback); + + writel(0, ®s->msi_config); + + /* By default, disable the watchdog timer. It will be enabled when + * a packet is queued. + */ + writel(0, ®s->watchdog_timer); +} + +/* et131x_config_rx_dma_regs - Start of Rx_DMA init sequence */ +static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter) +{ + struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; + struct rx_ring *rx_local = &adapter->rx_ring; + struct fbr_desc *fbr_entry; + u32 entry; + u32 psr_num_des; + unsigned long flags; + u8 id; + + et131x_rx_dma_disable(adapter); + + /* Load the completion writeback physical address */ + writel(upper_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_hi); + writel(lower_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_lo); + + memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block)); + + /* Set the address and parameters of the packet status ring */ + writel(upper_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_hi); + writel(lower_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_lo); + writel(rx_local->psr_entries - 1, &rx_dma->psr_num_des); + writel(0, &rx_dma->psr_full_offset); + + psr_num_des = readl(&rx_dma->psr_num_des) & ET_RXDMA_PSR_NUM_DES_MASK; + writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100, + &rx_dma->psr_min_des); + + spin_lock_irqsave(&adapter->rcv_lock, flags); + + /* These local variables track the PSR in the adapter structure */ + rx_local->local_psr_full = 0; + + for (id = 0; id < NUM_FBRS; id++) { + u32 __iomem *num_des; + u32 __iomem *full_offset; + u32 __iomem *min_des; + u32 __iomem *base_hi; + u32 __iomem *base_lo; + struct fbr_lookup *fbr = rx_local->fbr[id]; + + if (id == 0) { + num_des = &rx_dma->fbr0_num_des; + full_offset = &rx_dma->fbr0_full_offset; + min_des = &rx_dma->fbr0_min_des; + base_hi = &rx_dma->fbr0_base_hi; + base_lo = &rx_dma->fbr0_base_lo; + } else { + num_des = &rx_dma->fbr1_num_des; + full_offset = &rx_dma->fbr1_full_offset; + min_des = &rx_dma->fbr1_min_des; + base_hi = &rx_dma->fbr1_base_hi; + base_lo = &rx_dma->fbr1_base_lo; + } + + /* Now's the best time to initialize FBR contents */ + fbr_entry = fbr->ring_virtaddr; + for (entry = 0; entry < fbr->num_entries; entry++) { + fbr_entry->addr_hi = fbr->bus_high[entry]; + fbr_entry->addr_lo = fbr->bus_low[entry]; + fbr_entry->word2 = entry; + fbr_entry++; + } + + /* Set the address and parameters of Free buffer ring 1 and 0 */ + writel(upper_32_bits(fbr->ring_physaddr), base_hi); + writel(lower_32_bits(fbr->ring_physaddr), base_lo); + writel(fbr->num_entries - 1, num_des); + writel(ET_DMA10_WRAP, full_offset); + + /* This variable tracks the free buffer ring 1 full position, + * so it has to match the above. + */ + fbr->local_full = ET_DMA10_WRAP; + writel(((fbr->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1, + min_des); + } + + /* Program the number of packets we will receive before generating an + * interrupt. + * For version B silicon, this value gets updated once autoneg is + *complete. + */ + writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done); + + /* The "time_done" is not working correctly to coalesce interrupts + * after a given time period, but rather is giving us an interrupt + * regardless of whether we have received packets. + * This value gets updated once autoneg is complete. + */ + writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time); + + spin_unlock_irqrestore(&adapter->rcv_lock, flags); +} + +/* et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore. + * + * Configure the transmit engine with the ring buffers we have created + * and prepare it for use. + */ +static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter) +{ + struct txdma_regs __iomem *txdma = &adapter->regs->txdma; + struct tx_ring *tx_ring = &adapter->tx_ring; + + /* Load the hardware with the start of the transmit descriptor ring. */ + writel(upper_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_hi); + writel(lower_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_lo); + + /* Initialise the transmit DMA engine */ + writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des); + + /* Load the completion writeback physical address */ + writel(upper_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_hi); + writel(lower_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_lo); + + *tx_ring->tx_status = 0; + + writel(0, &txdma->service_request); + tx_ring->send_idx = 0; +} + +/* et131x_adapter_setup - Set the adapter up as per cassini+ documentation */ +static void et131x_adapter_setup(struct et131x_adapter *adapter) +{ + et131x_configure_global_regs(adapter); + et1310_config_mac_regs1(adapter); + + /* Configure the MMC registers */ + /* All we need to do is initialize the Memory Control Register */ + writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl); + + et1310_config_rxmac_regs(adapter); + et1310_config_txmac_regs(adapter); + + et131x_config_rx_dma_regs(adapter); + et131x_config_tx_dma_regs(adapter); + + et1310_config_macstat_regs(adapter); + + et1310_phy_power_switch(adapter, 0); + et131x_xcvr_init(adapter); +} + +/* et131x_soft_reset - Issue soft reset to the hardware, complete for ET1310 */ +static void et131x_soft_reset(struct et131x_adapter *adapter) +{ + u32 reg; + + /* Disable MAC Core */ + reg = ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET | + ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC | + ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC; + writel(reg, &adapter->regs->mac.cfg1); + + reg = ET_RESET_ALL; + writel(reg, &adapter->regs->global.sw_reset); + + reg = ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC | + ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC; + writel(reg, &adapter->regs->mac.cfg1); + writel(0, &adapter->regs->mac.cfg1); +} + +static void et131x_enable_interrupts(struct et131x_adapter *adapter) +{ + u32 mask; + + if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH) + mask = INT_MASK_ENABLE; + else + mask = INT_MASK_ENABLE_NO_FLOW; + + writel(mask, &adapter->regs->global.int_mask); +} + +static void et131x_disable_interrupts(struct et131x_adapter *adapter) +{ + writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask); +} + +static void et131x_tx_dma_disable(struct et131x_adapter *adapter) +{ + /* Setup the transmit dma configuration register */ + writel(ET_TXDMA_CSR_HALT | ET_TXDMA_SNGL_EPKT, + &adapter->regs->txdma.csr); +} + +static void et131x_enable_txrx(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + + et131x_rx_dma_enable(adapter); + et131x_tx_dma_enable(adapter); + + if (adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE) + et131x_enable_interrupts(adapter); + + netif_start_queue(netdev); +} + +static void et131x_disable_txrx(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + + netif_stop_queue(netdev); + + et131x_rx_dma_disable(adapter); + et131x_tx_dma_disable(adapter); + + et131x_disable_interrupts(adapter); +} + +static void et131x_init_send(struct et131x_adapter *adapter) +{ + int i; + struct tx_ring *tx_ring = &adapter->tx_ring; + struct tcb *tcb = tx_ring->tcb_ring; + + tx_ring->tcb_qhead = tcb; + + memset(tcb, 0, sizeof(struct tcb) * NUM_TCB); + + for (i = 0; i < NUM_TCB; i++) { + tcb->next = tcb + 1; + tcb++; + } + + tcb--; + tx_ring->tcb_qtail = tcb; + tcb->next = NULL; + /* Curr send queue should now be empty */ + tx_ring->send_head = NULL; + tx_ring->send_tail = NULL; +} + +/* et1310_enable_phy_coma + * + * driver receive an phy status change interrupt while in D0 and check that + * phy_status is down. + * + * -- gate off JAGCore; + * -- set gigE PHY in Coma mode + * -- wake on phy_interrupt; Perform software reset JAGCore, + * re-initialize jagcore and gigE PHY + */ +static void et1310_enable_phy_coma(struct et131x_adapter *adapter) +{ + u32 pmcsr = readl(&adapter->regs->global.pm_csr); + + /* Stop sending packets. */ + adapter->flags |= FMP_ADAPTER_LOWER_POWER; + + /* Wait for outstanding Receive packets */ + et131x_disable_txrx(adapter->netdev); + + /* Gate off JAGCore 3 clock domains */ + pmcsr &= ~ET_PMCSR_INIT; + writel(pmcsr, &adapter->regs->global.pm_csr); + + /* Program gigE PHY in to Coma mode */ + pmcsr |= ET_PM_PHY_SW_COMA; + writel(pmcsr, &adapter->regs->global.pm_csr); +} + +static void et1310_disable_phy_coma(struct et131x_adapter *adapter) +{ + u32 pmcsr; + + pmcsr = readl(&adapter->regs->global.pm_csr); + + /* Disable phy_sw_coma register and re-enable JAGCore clocks */ + pmcsr |= ET_PMCSR_INIT; + pmcsr &= ~ET_PM_PHY_SW_COMA; + writel(pmcsr, &adapter->regs->global.pm_csr); + + /* Restore the GbE PHY speed and duplex modes; + * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY + */ + + /* Re-initialize the send structures */ + et131x_init_send(adapter); + + /* Bring the device back to the state it was during init prior to + * autonegotiation being complete. This way, when we get the auto-neg + * complete interrupt, we can complete init by calling ConfigMacREGS2. + */ + et131x_soft_reset(adapter); + + et131x_adapter_setup(adapter); + + /* Allow Tx to restart */ + adapter->flags &= ~FMP_ADAPTER_LOWER_POWER; + + et131x_enable_txrx(adapter->netdev); +} + +static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit) +{ + u32 tmp_free_buff_ring = *free_buff_ring; + + tmp_free_buff_ring++; + /* This works for all cases where limit < 1024. The 1023 case + * works because 1023++ is 1024 which means the if condition is not + * taken but the carry of the bit into the wrap bit toggles the wrap + * value correctly + */ + if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) { + tmp_free_buff_ring &= ~ET_DMA10_MASK; + tmp_free_buff_ring ^= ET_DMA10_WRAP; + } + /* For the 1023 case */ + tmp_free_buff_ring &= (ET_DMA10_MASK | ET_DMA10_WRAP); + *free_buff_ring = tmp_free_buff_ring; + return tmp_free_buff_ring; +} + +/* et131x_rx_dma_memory_alloc + * + * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required, + * and the Packet Status Ring. + */ +static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) +{ + u8 id; + u32 i, j; + u32 bufsize; + u32 psr_size; + u32 fbr_chunksize; + struct rx_ring *rx_ring = &adapter->rx_ring; + struct fbr_lookup *fbr; + + /* Alloc memory for the lookup table */ + rx_ring->fbr[0] = kzalloc(sizeof(*fbr), GFP_KERNEL); + if (rx_ring->fbr[0] == NULL) + return -ENOMEM; + rx_ring->fbr[1] = kzalloc(sizeof(*fbr), GFP_KERNEL); + if (rx_ring->fbr[1] == NULL) + return -ENOMEM; + + /* The first thing we will do is configure the sizes of the buffer + * rings. These will change based on jumbo packet support. Larger + * jumbo packets increases the size of each entry in FBR0, and the + * number of entries in FBR0, while at the same time decreasing the + * number of entries in FBR1. + * + * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1 + * entries are huge in order to accommodate a "jumbo" frame, then it + * will have less entries. Conversely, FBR1 will now be relied upon + * to carry more "normal" frames, thus it's entry size also increases + * and the number of entries goes up too (since it now carries + * "small" + "regular" packets. + * + * In this scheme, we try to maintain 512 entries between the two + * rings. Also, FBR1 remains a constant size - when it's size doubles + * the number of entries halves. FBR0 increases in size, however. + */ + if (adapter->registry_jumbo_packet < 2048) { + rx_ring->fbr[0]->buffsize = 256; + rx_ring->fbr[0]->num_entries = 512; + rx_ring->fbr[1]->buffsize = 2048; + rx_ring->fbr[1]->num_entries = 512; + } else if (adapter->registry_jumbo_packet < 4096) { + rx_ring->fbr[0]->buffsize = 512; + rx_ring->fbr[0]->num_entries = 1024; + rx_ring->fbr[1]->buffsize = 4096; + rx_ring->fbr[1]->num_entries = 512; + } else { + rx_ring->fbr[0]->buffsize = 1024; + rx_ring->fbr[0]->num_entries = 768; + rx_ring->fbr[1]->buffsize = 16384; + rx_ring->fbr[1]->num_entries = 128; + } + + rx_ring->psr_entries = rx_ring->fbr[0]->num_entries + + rx_ring->fbr[1]->num_entries; + + for (id = 0; id < NUM_FBRS; id++) { + fbr = rx_ring->fbr[id]; + /* Allocate an area of memory for Free Buffer Ring */ + bufsize = sizeof(struct fbr_desc) * fbr->num_entries; + fbr->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, + bufsize, + &fbr->ring_physaddr, + GFP_KERNEL); + if (!fbr->ring_virtaddr) { + dev_err(&adapter->pdev->dev, + "Cannot alloc memory for Free Buffer Ring %d\n", + id); + return -ENOMEM; + } + } + + for (id = 0; id < NUM_FBRS; id++) { + fbr = rx_ring->fbr[id]; + fbr_chunksize = (FBR_CHUNKS * fbr->buffsize); + + for (i = 0; i < fbr->num_entries / FBR_CHUNKS; i++) { + dma_addr_t fbr_physaddr; + + fbr->mem_virtaddrs[i] = dma_alloc_coherent( + &adapter->pdev->dev, fbr_chunksize, + &fbr->mem_physaddrs[i], + GFP_KERNEL); + + if (!fbr->mem_virtaddrs[i]) { + dev_err(&adapter->pdev->dev, + "Could not alloc memory\n"); + return -ENOMEM; + } + + /* See NOTE in "Save Physical Address" comment above */ + fbr_physaddr = fbr->mem_physaddrs[i]; + + for (j = 0; j < FBR_CHUNKS; j++) { + u32 k = (i * FBR_CHUNKS) + j; + + /* Save the Virtual address of this index for + * quick access later + */ + fbr->virt[k] = (u8 *)fbr->mem_virtaddrs[i] + + (j * fbr->buffsize); + + /* now store the physical address in the + * descriptor so the device can access it + */ + fbr->bus_high[k] = upper_32_bits(fbr_physaddr); + fbr->bus_low[k] = lower_32_bits(fbr_physaddr); + fbr_physaddr += fbr->buffsize; + } + } + } + + /* Allocate an area of memory for FIFO of Packet Status ring entries */ + psr_size = sizeof(struct pkt_stat_desc) * rx_ring->psr_entries; + + rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, + psr_size, + &rx_ring->ps_ring_physaddr, + GFP_KERNEL); + + if (!rx_ring->ps_ring_virtaddr) { + dev_err(&adapter->pdev->dev, + "Cannot alloc memory for Packet Status Ring\n"); + return -ENOMEM; + } + + /* Allocate an area of memory for writeback of status information */ + rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev, + sizeof(struct rx_status_block), + &rx_ring->rx_status_bus, + GFP_KERNEL); + if (!rx_ring->rx_status_block) { + dev_err(&adapter->pdev->dev, + "Cannot alloc memory for Status Block\n"); + return -ENOMEM; + } + rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD; + + /* The RFDs are going to be put on lists later on, so initialize the + * lists now. + */ + INIT_LIST_HEAD(&rx_ring->recv_list); + return 0; +} + +static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter) +{ + u8 id; + u32 ii; + u32 bufsize; + u32 psr_size; + struct rfd *rfd; + struct rx_ring *rx_ring = &adapter->rx_ring; + struct fbr_lookup *fbr; + + /* Free RFDs and associated packet descriptors */ + WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd); + + while (!list_empty(&rx_ring->recv_list)) { + rfd = list_entry(rx_ring->recv_list.next, + struct rfd, list_node); + + list_del(&rfd->list_node); + rfd->skb = NULL; + kfree(rfd); + } + + /* Free Free Buffer Rings */ + for (id = 0; id < NUM_FBRS; id++) { + fbr = rx_ring->fbr[id]; + + if (!fbr || !fbr->ring_virtaddr) + continue; + + /* First the packet memory */ + for (ii = 0; ii < fbr->num_entries / FBR_CHUNKS; ii++) { + if (fbr->mem_virtaddrs[ii]) { + bufsize = fbr->buffsize * FBR_CHUNKS; + + dma_free_coherent(&adapter->pdev->dev, + bufsize, + fbr->mem_virtaddrs[ii], + fbr->mem_physaddrs[ii]); + + fbr->mem_virtaddrs[ii] = NULL; + } + } + + bufsize = sizeof(struct fbr_desc) * fbr->num_entries; + + dma_free_coherent(&adapter->pdev->dev, + bufsize, + fbr->ring_virtaddr, + fbr->ring_physaddr); + + fbr->ring_virtaddr = NULL; + } + + /* Free Packet Status Ring */ + if (rx_ring->ps_ring_virtaddr) { + psr_size = sizeof(struct pkt_stat_desc) * rx_ring->psr_entries; + + dma_free_coherent(&adapter->pdev->dev, psr_size, + rx_ring->ps_ring_virtaddr, + rx_ring->ps_ring_physaddr); + + rx_ring->ps_ring_virtaddr = NULL; + } + + /* Free area of memory for the writeback of status information */ + if (rx_ring->rx_status_block) { + dma_free_coherent(&adapter->pdev->dev, + sizeof(struct rx_status_block), + rx_ring->rx_status_block, + rx_ring->rx_status_bus); + rx_ring->rx_status_block = NULL; + } + + /* Free the FBR Lookup Table */ + kfree(rx_ring->fbr[0]); + kfree(rx_ring->fbr[1]); + + /* Reset Counters */ + rx_ring->num_ready_recv = 0; +} + +/* et131x_init_recv - Initialize receive data structures */ +static int et131x_init_recv(struct et131x_adapter *adapter) +{ + struct rfd *rfd; + u32 rfdct; + struct rx_ring *rx_ring = &adapter->rx_ring; + + /* Setup each RFD */ + for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) { + rfd = kzalloc(sizeof(*rfd), GFP_ATOMIC | GFP_DMA); + if (!rfd) + return -ENOMEM; + + rfd->skb = NULL; + + /* Add this RFD to the recv_list */ + list_add_tail(&rfd->list_node, &rx_ring->recv_list); + + /* Increment the available RFD's */ + rx_ring->num_ready_recv++; + } + + return 0; +} + +/* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate */ +static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter) +{ + struct phy_device *phydev = adapter->phydev; + + /* For version B silicon, we do not use the RxDMA timer for 10 and 100 + * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing. + */ + if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) { + writel(0, &adapter->regs->rxdma.max_pkt_time); + writel(1, &adapter->regs->rxdma.num_pkt_done); + } +} + +/* nic_return_rfd - Recycle a RFD and put it back onto the receive list */ +static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd) +{ + struct rx_ring *rx_local = &adapter->rx_ring; + struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; + u16 buff_index = rfd->bufferindex; + u8 ring_index = rfd->ringindex; + unsigned long flags; + struct fbr_lookup *fbr = rx_local->fbr[ring_index]; + + /* We don't use any of the OOB data besides status. Otherwise, we + * need to clean up OOB data + */ + if (buff_index < fbr->num_entries) { + u32 free_buff_ring; + u32 __iomem *offset; + struct fbr_desc *next; + + if (ring_index == 0) + offset = &rx_dma->fbr0_full_offset; + else + offset = &rx_dma->fbr1_full_offset; + + next = (struct fbr_desc *)(fbr->ring_virtaddr) + + INDEX10(fbr->local_full); + + /* Handle the Free Buffer Ring advancement here. Write + * the PA / Buffer Index for the returned buffer into + * the oldest (next to be freed)FBR entry + */ + next->addr_hi = fbr->bus_high[buff_index]; + next->addr_lo = fbr->bus_low[buff_index]; + next->word2 = buff_index; + + free_buff_ring = bump_free_buff_ring(&fbr->local_full, + fbr->num_entries - 1); + writel(free_buff_ring, offset); + } else { + dev_err(&adapter->pdev->dev, + "%s illegal Buffer Index returned\n", __func__); + } + + /* The processing on this RFD is done, so put it back on the tail of + * our list + */ + spin_lock_irqsave(&adapter->rcv_lock, flags); + list_add_tail(&rfd->list_node, &rx_local->recv_list); + rx_local->num_ready_recv++; + spin_unlock_irqrestore(&adapter->rcv_lock, flags); + + WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd); +} + +/* nic_rx_pkts - Checks the hardware for available packets + * + * Checks the hardware for available packets, using completion ring + * If packets are available, it gets an RFD from the recv_list, attaches + * the packet to it, puts the RFD in the RecvPendList, and also returns + * the pointer to the RFD. + */ +static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter) +{ + struct rx_ring *rx_local = &adapter->rx_ring; + struct rx_status_block *status; + struct pkt_stat_desc *psr; + struct rfd *rfd; + unsigned long flags; + struct list_head *element; + u8 ring_index; + u16 buff_index; + u32 len; + u32 word0; + u32 word1; + struct sk_buff *skb; + struct fbr_lookup *fbr; + + /* RX Status block is written by the DMA engine prior to every + * interrupt. It contains the next to be used entry in the Packet + * Status Ring, and also the two Free Buffer rings. + */ + status = rx_local->rx_status_block; + word1 = status->word1 >> 16; + + /* Check the PSR and wrap bits do not match */ + if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF)) + return NULL; /* Looks like this ring is not updated yet */ + + /* The packet status ring indicates that data is available. */ + psr = (struct pkt_stat_desc *)(rx_local->ps_ring_virtaddr) + + (rx_local->local_psr_full & 0xFFF); + + /* Grab any information that is required once the PSR is advanced, + * since we can no longer rely on the memory being accurate + */ + len = psr->word1 & 0xFFFF; + ring_index = (psr->word1 >> 26) & 0x03; + fbr = rx_local->fbr[ring_index]; + buff_index = (psr->word1 >> 16) & 0x3FF; + word0 = psr->word0; + + /* Indicate that we have used this PSR entry. */ + /* FIXME wrap 12 */ + add_12bit(&rx_local->local_psr_full, 1); + if ((rx_local->local_psr_full & 0xFFF) > rx_local->psr_entries - 1) { + /* Clear psr full and toggle the wrap bit */ + rx_local->local_psr_full &= ~0xFFF; + rx_local->local_psr_full ^= 0x1000; + } + + writel(rx_local->local_psr_full, &adapter->regs->rxdma.psr_full_offset); + + if (ring_index > 1 || buff_index > fbr->num_entries - 1) { + /* Illegal buffer or ring index cannot be used by S/W*/ + dev_err(&adapter->pdev->dev, + "NICRxPkts PSR Entry %d indicates length of %d and/or bad bi(%d)\n", + rx_local->local_psr_full & 0xFFF, len, buff_index); + return NULL; + } + + /* Get and fill the RFD. */ + spin_lock_irqsave(&adapter->rcv_lock, flags); + + element = rx_local->recv_list.next; + rfd = list_entry(element, struct rfd, list_node); + + if (!rfd) { + spin_unlock_irqrestore(&adapter->rcv_lock, flags); + return NULL; + } + + list_del(&rfd->list_node); + rx_local->num_ready_recv--; + + spin_unlock_irqrestore(&adapter->rcv_lock, flags); + + rfd->bufferindex = buff_index; + rfd->ringindex = ring_index; + + /* In V1 silicon, there is a bug which screws up filtering of runt + * packets. Therefore runt packet filtering is disabled in the MAC and + * the packets are dropped here. They are also counted here. + */ + if (len < (NIC_MIN_PACKET_SIZE + 4)) { + adapter->stats.rx_other_errs++; + rfd->len = 0; + goto out; + } + + if ((word0 & ALCATEL_MULTICAST_PKT) && !(word0 & ALCATEL_BROADCAST_PKT)) + adapter->stats.multicast_pkts_rcvd++; + + rfd->len = len; + + skb = dev_alloc_skb(rfd->len + 2); + if (!skb) + return NULL; + + adapter->netdev->stats.rx_bytes += rfd->len; + + memcpy(skb_put(skb, rfd->len), fbr->virt[buff_index], rfd->len); + + skb->protocol = eth_type_trans(skb, adapter->netdev); + skb->ip_summed = CHECKSUM_NONE; + netif_receive_skb(skb); + +out: + nic_return_rfd(adapter, rfd); + return rfd; +} + +static int et131x_handle_recv_pkts(struct et131x_adapter *adapter, int budget) +{ + struct rfd *rfd = NULL; + int count = 0; + int limit = budget; + bool done = true; + struct rx_ring *rx_ring = &adapter->rx_ring; + + if (budget > MAX_PACKETS_HANDLED) + limit = MAX_PACKETS_HANDLED; + + /* Process up to available RFD's */ + while (count < limit) { + if (list_empty(&rx_ring->recv_list)) { + WARN_ON(rx_ring->num_ready_recv != 0); + done = false; + break; + } + + rfd = nic_rx_pkts(adapter); + + if (rfd == NULL) + break; + + /* Do not receive any packets until a filter has been set. + * Do not receive any packets until we have link. + * If length is zero, return the RFD in order to advance the + * Free buffer ring. + */ + if (!adapter->packet_filter || + !netif_carrier_ok(adapter->netdev) || + rfd->len == 0) + continue; + + adapter->netdev->stats.rx_packets++; + + if (rx_ring->num_ready_recv < RFD_LOW_WATER_MARK) + dev_warn(&adapter->pdev->dev, "RFD's are running out\n"); + + count++; + } + + if (count == limit || !done) { + rx_ring->unfinished_receives = true; + writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, + &adapter->regs->global.watchdog_timer); + } else { + /* Watchdog timer will disable itself if appropriate. */ + rx_ring->unfinished_receives = false; + } + + return count; +} + +/* et131x_tx_dma_memory_alloc + * + * Allocates memory that will be visible both to the device and to the CPU. + * The OS will pass us packets, pointers to which we will insert in the Tx + * Descriptor queue. The device will read this queue to find the packets in + * memory. The device will update the "status" in memory each time it xmits a + * packet. + */ +static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) +{ + int desc_size = 0; + struct tx_ring *tx_ring = &adapter->tx_ring; + + /* Allocate memory for the TCB's (Transmit Control Block) */ + tx_ring->tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb), + GFP_ATOMIC | GFP_DMA); + if (!tx_ring->tcb_ring) + return -ENOMEM; + + desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX); + tx_ring->tx_desc_ring = dma_alloc_coherent(&adapter->pdev->dev, + desc_size, + &tx_ring->tx_desc_ring_pa, + GFP_KERNEL); + if (!tx_ring->tx_desc_ring) { + dev_err(&adapter->pdev->dev, + "Cannot alloc memory for Tx Ring\n"); + return -ENOMEM; + } + + tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev, + sizeof(u32), + &tx_ring->tx_status_pa, + GFP_KERNEL); + if (!tx_ring->tx_status_pa) { + dev_err(&adapter->pdev->dev, + "Cannot alloc memory for Tx status block\n"); + return -ENOMEM; + } + return 0; +} + +static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter) +{ + int desc_size = 0; + struct tx_ring *tx_ring = &adapter->tx_ring; + + if (tx_ring->tx_desc_ring) { + /* Free memory relating to Tx rings here */ + desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX); + dma_free_coherent(&adapter->pdev->dev, + desc_size, + tx_ring->tx_desc_ring, + tx_ring->tx_desc_ring_pa); + tx_ring->tx_desc_ring = NULL; + } + + /* Free memory for the Tx status block */ + if (tx_ring->tx_status) { + dma_free_coherent(&adapter->pdev->dev, + sizeof(u32), + tx_ring->tx_status, + tx_ring->tx_status_pa); + + tx_ring->tx_status = NULL; + } + /* Free the memory for the tcb structures */ + kfree(tx_ring->tcb_ring); +} + +/* nic_send_packet - NIC specific send handler for version B silicon. */ +static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) +{ + u32 i; + struct tx_desc desc[24]; + u32 frag = 0; + u32 thiscopy, remainder; + struct sk_buff *skb = tcb->skb; + u32 nr_frags = skb_shinfo(skb)->nr_frags + 1; + struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0]; + struct phy_device *phydev = adapter->phydev; + dma_addr_t dma_addr; + struct tx_ring *tx_ring = &adapter->tx_ring; + + /* Part of the optimizations of this send routine restrict us to + * sending 24 fragments at a pass. In practice we should never see + * more than 5 fragments. + */ + + /* nr_frags should be no more than 18. */ + BUILD_BUG_ON(MAX_SKB_FRAGS + 1 > 23); + + memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1)); + + for (i = 0; i < nr_frags; i++) { + /* If there is something in this element, lets get a + * descriptor from the ring and get the necessary data + */ + if (i == 0) { + /* If the fragments are smaller than a standard MTU, + * then map them to a single descriptor in the Tx + * Desc ring. However, if they're larger, as is + * possible with support for jumbo packets, then + * split them each across 2 descriptors. + * + * This will work until we determine why the hardware + * doesn't seem to like large fragments. + */ + if (skb_headlen(skb) <= 1514) { + /* Low 16bits are length, high is vlan and + * unused currently so zero + */ + desc[frag].len_vlan = skb_headlen(skb); + dma_addr = dma_map_single(&adapter->pdev->dev, + skb->data, + skb_headlen(skb), + DMA_TO_DEVICE); + desc[frag].addr_lo = lower_32_bits(dma_addr); + desc[frag].addr_hi = upper_32_bits(dma_addr); + frag++; + } else { + desc[frag].len_vlan = skb_headlen(skb) / 2; + dma_addr = dma_map_single(&adapter->pdev->dev, + skb->data, + skb_headlen(skb) / 2, + DMA_TO_DEVICE); + desc[frag].addr_lo = lower_32_bits(dma_addr); + desc[frag].addr_hi = upper_32_bits(dma_addr); + frag++; + + desc[frag].len_vlan = skb_headlen(skb) / 2; + dma_addr = dma_map_single(&adapter->pdev->dev, + skb->data + + skb_headlen(skb) / 2, + skb_headlen(skb) / 2, + DMA_TO_DEVICE); + desc[frag].addr_lo = lower_32_bits(dma_addr); + desc[frag].addr_hi = upper_32_bits(dma_addr); + frag++; + } + } else { + desc[frag].len_vlan = frags[i - 1].size; + dma_addr = skb_frag_dma_map(&adapter->pdev->dev, + &frags[i - 1], + 0, + frags[i - 1].size, + DMA_TO_DEVICE); + desc[frag].addr_lo = lower_32_bits(dma_addr); + desc[frag].addr_hi = upper_32_bits(dma_addr); + frag++; + } + } + + if (phydev && phydev->speed == SPEED_1000) { + if (++tx_ring->since_irq == PARM_TX_NUM_BUFS_DEF) { + /* Last element & Interrupt flag */ + desc[frag - 1].flags = + TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT; + tx_ring->since_irq = 0; + } else { /* Last element */ + desc[frag - 1].flags = TXDESC_FLAG_LASTPKT; + } + } else { + desc[frag - 1].flags = + TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT; + } + + desc[0].flags |= TXDESC_FLAG_FIRSTPKT; + + tcb->index_start = tx_ring->send_idx; + tcb->stale = 0; + + thiscopy = NUM_DESC_PER_RING_TX - INDEX10(tx_ring->send_idx); + + if (thiscopy >= frag) { + remainder = 0; + thiscopy = frag; + } else { + remainder = frag - thiscopy; + } + + memcpy(tx_ring->tx_desc_ring + INDEX10(tx_ring->send_idx), + desc, + sizeof(struct tx_desc) * thiscopy); + + add_10bit(&tx_ring->send_idx, thiscopy); + + if (INDEX10(tx_ring->send_idx) == 0 || + INDEX10(tx_ring->send_idx) == NUM_DESC_PER_RING_TX) { + tx_ring->send_idx &= ~ET_DMA10_MASK; + tx_ring->send_idx ^= ET_DMA10_WRAP; + } + + if (remainder) { + memcpy(tx_ring->tx_desc_ring, + desc + thiscopy, + sizeof(struct tx_desc) * remainder); + + add_10bit(&tx_ring->send_idx, remainder); + } + + if (INDEX10(tx_ring->send_idx) == 0) { + if (tx_ring->send_idx) + tcb->index = NUM_DESC_PER_RING_TX - 1; + else + tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1); + } else { + tcb->index = tx_ring->send_idx - 1; + } + + spin_lock(&adapter->tcb_send_qlock); + + if (tx_ring->send_tail) + tx_ring->send_tail->next = tcb; + else + tx_ring->send_head = tcb; + + tx_ring->send_tail = tcb; + + WARN_ON(tcb->next != NULL); + + tx_ring->used++; + + spin_unlock(&adapter->tcb_send_qlock); + + /* Write the new write pointer back to the device. */ + writel(tx_ring->send_idx, &adapter->regs->txdma.service_request); + + /* For Gig only, we use Tx Interrupt coalescing. Enable the software + * timer to wake us up if this packet isn't followed by N more. + */ + if (phydev && phydev->speed == SPEED_1000) { + writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, + &adapter->regs->global.watchdog_timer); + } + return 0; +} + +static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter) +{ + int status; + struct tcb *tcb; + unsigned long flags; + struct tx_ring *tx_ring = &adapter->tx_ring; + + /* All packets must have at least a MAC address and a protocol type */ + if (skb->len < ETH_HLEN) + return -EIO; + + spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); + + tcb = tx_ring->tcb_qhead; + + if (tcb == NULL) { + spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); + return -ENOMEM; + } + + tx_ring->tcb_qhead = tcb->next; + + if (tx_ring->tcb_qhead == NULL) + tx_ring->tcb_qtail = NULL; + + spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); + + tcb->skb = skb; + tcb->next = NULL; + + status = nic_send_packet(adapter, tcb); + + if (status != 0) { + spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); + + if (tx_ring->tcb_qtail) + tx_ring->tcb_qtail->next = tcb; + else + /* Apparently ready Q is empty. */ + tx_ring->tcb_qhead = tcb; + + tx_ring->tcb_qtail = tcb; + spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); + return status; + } + WARN_ON(tx_ring->used > NUM_TCB); + return 0; +} + +/* free_send_packet - Recycle a struct tcb */ +static inline void free_send_packet(struct et131x_adapter *adapter, + struct tcb *tcb) +{ + unsigned long flags; + struct tx_desc *desc = NULL; + struct net_device_stats *stats = &adapter->netdev->stats; + struct tx_ring *tx_ring = &adapter->tx_ring; + u64 dma_addr; + + if (tcb->skb) { + stats->tx_bytes += tcb->skb->len; + + /* Iterate through the TX descriptors on the ring + * corresponding to this packet and umap the fragments + * they point to + */ + do { + desc = tx_ring->tx_desc_ring + + INDEX10(tcb->index_start); + + dma_addr = desc->addr_lo; + dma_addr |= (u64)desc->addr_hi << 32; + + dma_unmap_single(&adapter->pdev->dev, + dma_addr, + desc->len_vlan, DMA_TO_DEVICE); + + add_10bit(&tcb->index_start, 1); + if (INDEX10(tcb->index_start) >= + NUM_DESC_PER_RING_TX) { + tcb->index_start &= ~ET_DMA10_MASK; + tcb->index_start ^= ET_DMA10_WRAP; + } + } while (desc != tx_ring->tx_desc_ring + INDEX10(tcb->index)); + + dev_kfree_skb_any(tcb->skb); + } + + memset(tcb, 0, sizeof(struct tcb)); + + /* Add the TCB to the Ready Q */ + spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); + + stats->tx_packets++; + + if (tx_ring->tcb_qtail) + tx_ring->tcb_qtail->next = tcb; + else /* Apparently ready Q is empty. */ + tx_ring->tcb_qhead = tcb; + + tx_ring->tcb_qtail = tcb; + + spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); + WARN_ON(tx_ring->used < 0); +} + +/* et131x_free_busy_send_packets - Free and complete the stopped active sends */ +static void et131x_free_busy_send_packets(struct et131x_adapter *adapter) +{ + struct tcb *tcb; + unsigned long flags; + u32 freed = 0; + struct tx_ring *tx_ring = &adapter->tx_ring; + + /* Any packets being sent? Check the first TCB on the send list */ + spin_lock_irqsave(&adapter->tcb_send_qlock, flags); + + tcb = tx_ring->send_head; + + while (tcb != NULL && freed < NUM_TCB) { + struct tcb *next = tcb->next; + + tx_ring->send_head = next; + + if (next == NULL) + tx_ring->send_tail = NULL; + + tx_ring->used--; + + spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); + + freed++; + free_send_packet(adapter, tcb); + + spin_lock_irqsave(&adapter->tcb_send_qlock, flags); + + tcb = tx_ring->send_head; + } + + WARN_ON(freed == NUM_TCB); + + spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); + + tx_ring->used = 0; +} + +/* et131x_handle_send_pkts + * + * Re-claim the send resources, complete sends and get more to send from + * the send wait queue. + */ +static void et131x_handle_send_pkts(struct et131x_adapter *adapter) +{ + unsigned long flags; + u32 serviced; + struct tcb *tcb; + u32 index; + struct tx_ring *tx_ring = &adapter->tx_ring; + + serviced = readl(&adapter->regs->txdma.new_service_complete); + index = INDEX10(serviced); + + /* Has the ring wrapped? Process any descriptors that do not have + * the same "wrap" indicator as the current completion indicator + */ + spin_lock_irqsave(&adapter->tcb_send_qlock, flags); + + tcb = tx_ring->send_head; + + while (tcb && + ((serviced ^ tcb->index) & ET_DMA10_WRAP) && + index < INDEX10(tcb->index)) { + tx_ring->used--; + tx_ring->send_head = tcb->next; + if (tcb->next == NULL) + tx_ring->send_tail = NULL; + + spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); + free_send_packet(adapter, tcb); + spin_lock_irqsave(&adapter->tcb_send_qlock, flags); + + /* Goto the next packet */ + tcb = tx_ring->send_head; + } + while (tcb && + !((serviced ^ tcb->index) & ET_DMA10_WRAP) && + index > (tcb->index & ET_DMA10_MASK)) { + tx_ring->used--; + tx_ring->send_head = tcb->next; + if (tcb->next == NULL) + tx_ring->send_tail = NULL; + + spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); + free_send_packet(adapter, tcb); + spin_lock_irqsave(&adapter->tcb_send_qlock, flags); + + /* Goto the next packet */ + tcb = tx_ring->send_head; + } + + /* Wake up the queue when we hit a low-water mark */ + if (tx_ring->used <= NUM_TCB / 3) + netif_wake_queue(adapter->netdev); + + spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); +} + +static int et131x_get_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + + return phy_ethtool_gset(adapter->phydev, cmd); +} + +static int et131x_set_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + + return phy_ethtool_sset(adapter->phydev, cmd); +} + +static int et131x_get_regs_len(struct net_device *netdev) +{ +#define ET131X_REGS_LEN 256 + return ET131X_REGS_LEN * sizeof(u32); +} + +static void et131x_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *regs_data) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + struct address_map __iomem *aregs = adapter->regs; + u32 *regs_buff = regs_data; + u32 num = 0; + u16 tmp; + + memset(regs_data, 0, et131x_get_regs_len(netdev)); + + regs->version = (1 << 24) | (adapter->pdev->revision << 16) | + adapter->pdev->device; + + /* PHY regs */ + et131x_mii_read(adapter, MII_BMCR, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_BMSR, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_PHYSID1, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_PHYSID2, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_ADVERTISE, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_LPA, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_EXPANSION, &tmp); + regs_buff[num++] = tmp; + /* Autoneg next page transmit reg */ + et131x_mii_read(adapter, 0x07, &tmp); + regs_buff[num++] = tmp; + /* Link partner next page reg */ + et131x_mii_read(adapter, 0x08, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_CTRL1000, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_STAT1000, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, 0x0b, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, 0x0c, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_MMD_CTRL, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_MMD_DATA, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_ESTATUS, &tmp); + regs_buff[num++] = tmp; + + et131x_mii_read(adapter, PHY_INDEX_REG, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_DATA_REG, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL + 1, &tmp); + regs_buff[num++] = tmp; + + et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_CONFIG, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_PHY_CONTROL, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_PHY_STATUS, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_LED_1, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_LED_2, &tmp); + regs_buff[num++] = tmp; + + /* Global regs */ + regs_buff[num++] = readl(&aregs->global.txq_start_addr); + regs_buff[num++] = readl(&aregs->global.txq_end_addr); + regs_buff[num++] = readl(&aregs->global.rxq_start_addr); + regs_buff[num++] = readl(&aregs->global.rxq_end_addr); + regs_buff[num++] = readl(&aregs->global.pm_csr); + regs_buff[num++] = adapter->stats.interrupt_status; + regs_buff[num++] = readl(&aregs->global.int_mask); + regs_buff[num++] = readl(&aregs->global.int_alias_clr_en); + regs_buff[num++] = readl(&aregs->global.int_status_alias); + regs_buff[num++] = readl(&aregs->global.sw_reset); + regs_buff[num++] = readl(&aregs->global.slv_timer); + regs_buff[num++] = readl(&aregs->global.msi_config); + regs_buff[num++] = readl(&aregs->global.loopback); + regs_buff[num++] = readl(&aregs->global.watchdog_timer); + + /* TXDMA regs */ + regs_buff[num++] = readl(&aregs->txdma.csr); + regs_buff[num++] = readl(&aregs->txdma.pr_base_hi); + regs_buff[num++] = readl(&aregs->txdma.pr_base_lo); + regs_buff[num++] = readl(&aregs->txdma.pr_num_des); + regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr); + regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext); + regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr); + regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi); + regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo); + regs_buff[num++] = readl(&aregs->txdma.service_request); + regs_buff[num++] = readl(&aregs->txdma.service_complete); + regs_buff[num++] = readl(&aregs->txdma.cache_rd_index); + regs_buff[num++] = readl(&aregs->txdma.cache_wr_index); + regs_buff[num++] = readl(&aregs->txdma.tx_dma_error); + regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt); + regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt); + regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt); + regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt); + regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt); + regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt); + regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt); + regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt); + regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt); + regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt); + regs_buff[num++] = readl(&aregs->txdma.new_service_complete); + regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt); + + /* RXDMA regs */ + regs_buff[num++] = readl(&aregs->rxdma.csr); + regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi); + regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo); + regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done); + regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time); + regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr); + regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext); + regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr); + regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi); + regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo); + regs_buff[num++] = readl(&aregs->rxdma.psr_num_des); + regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset); + regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset); + regs_buff[num++] = readl(&aregs->rxdma.psr_access_index); + regs_buff[num++] = readl(&aregs->rxdma.psr_min_des); + regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo); + regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi); + regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des); + regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset); + regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset); + regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index); + regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des); + regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo); + regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi); + regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des); + regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset); + regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset); + regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index); + regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des); +} + +static void et131x_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + + strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver)); + strlcpy(info->version, DRIVER_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(adapter->pdev), + sizeof(info->bus_info)); +} + +static struct ethtool_ops et131x_ethtool_ops = { + .get_settings = et131x_get_settings, + .set_settings = et131x_set_settings, + .get_drvinfo = et131x_get_drvinfo, + .get_regs_len = et131x_get_regs_len, + .get_regs = et131x_get_regs, + .get_link = ethtool_op_get_link, +}; + +/* et131x_hwaddr_init - set up the MAC Address */ +static void et131x_hwaddr_init(struct et131x_adapter *adapter) +{ + /* If have our default mac from init and no mac address from + * EEPROM then we need to generate the last octet and set it on the + * device + */ + if (is_zero_ether_addr(adapter->rom_addr)) { + /* We need to randomly generate the last octet so we + * decrease our chances of setting the mac address to + * same as another one of our cards in the system + */ + get_random_bytes(&adapter->addr[5], 1); + /* We have the default value in the register we are + * working with so we need to copy the current + * address into the permanent address + */ + ether_addr_copy(adapter->rom_addr, adapter->addr); + } else { + /* We do not have an override address, so set the + * current address to the permanent address and add + * it to the device + */ + ether_addr_copy(adapter->addr, adapter->rom_addr); + } +} + +static int et131x_pci_init(struct et131x_adapter *adapter, + struct pci_dev *pdev) +{ + u16 max_payload; + int i, rc; + + rc = et131x_init_eeprom(adapter); + if (rc < 0) + goto out; + + if (!pci_is_pcie(pdev)) { + dev_err(&pdev->dev, "Missing PCIe capabilities\n"); + goto err_out; + } + + /* Program the Ack/Nak latency and replay timers */ + max_payload = pdev->pcie_mpss; + + if (max_payload < 2) { + static const u16 acknak[2] = { 0x76, 0xD0 }; + static const u16 replay[2] = { 0x1E0, 0x2ED }; + + if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK, + acknak[max_payload])) { + dev_err(&pdev->dev, + "Could not write PCI config space for ACK/NAK\n"); + goto err_out; + } + if (pci_write_config_word(pdev, ET1310_PCI_REPLAY, + replay[max_payload])) { + dev_err(&pdev->dev, + "Could not write PCI config space for Replay Timer\n"); + goto err_out; + } + } + + /* l0s and l1 latency timers. We are using default values. + * Representing 001 for L0s and 010 for L1 + */ + if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) { + dev_err(&pdev->dev, + "Could not write PCI config space for Latency Timers\n"); + goto err_out; + } + + /* Change the max read size to 2k */ + if (pcie_set_readrq(pdev, 2048)) { + dev_err(&pdev->dev, + "Couldn't change PCI config space for Max read size\n"); + goto err_out; + } + + /* Get MAC address from config space if an eeprom exists, otherwise + * the MAC address there will not be valid + */ + if (!adapter->has_eeprom) { + et131x_hwaddr_init(adapter); + return 0; + } + + for (i = 0; i < ETH_ALEN; i++) { + if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i, + adapter->rom_addr + i)) { + dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n"); + goto err_out; + } + } + ether_addr_copy(adapter->addr, adapter->rom_addr); +out: + return rc; +err_out: + rc = -EIO; + goto out; +} + +/* et131x_error_timer_handler + * @data: timer-specific variable; here a pointer to our adapter structure + * + * The routine called when the error timer expires, to track the number of + * recurring errors. + */ +static void et131x_error_timer_handler(unsigned long data) +{ + struct et131x_adapter *adapter = (struct et131x_adapter *)data; + struct phy_device *phydev = adapter->phydev; + + if (et1310_in_phy_coma(adapter)) { + /* Bring the device immediately out of coma, to + * prevent it from sleeping indefinitely, this + * mechanism could be improved! + */ + et1310_disable_phy_coma(adapter); + adapter->boot_coma = 20; + } else { + et1310_update_macstat_host_counters(adapter); + } + + if (!phydev->link && adapter->boot_coma < 11) + adapter->boot_coma++; + + if (adapter->boot_coma == 10) { + if (!phydev->link) { + if (!et1310_in_phy_coma(adapter)) { + /* NOTE - This was originally a 'sync with + * interrupt'. How to do that under Linux? + */ + et131x_enable_interrupts(adapter); + et1310_enable_phy_coma(adapter); + } + } + } + + /* This is a periodic timer, so reschedule */ + mod_timer(&adapter->error_timer, jiffies + TX_ERROR_PERIOD * HZ / 1000); +} + +static void et131x_adapter_memory_free(struct et131x_adapter *adapter) +{ + et131x_tx_dma_memory_free(adapter); + et131x_rx_dma_memory_free(adapter); +} + +static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter) +{ + int status; + + status = et131x_tx_dma_memory_alloc(adapter); + if (status) { + dev_err(&adapter->pdev->dev, + "et131x_tx_dma_memory_alloc FAILED\n"); + et131x_tx_dma_memory_free(adapter); + return status; + } + + status = et131x_rx_dma_memory_alloc(adapter); + if (status) { + dev_err(&adapter->pdev->dev, + "et131x_rx_dma_memory_alloc FAILED\n"); + et131x_adapter_memory_free(adapter); + return status; + } + + status = et131x_init_recv(adapter); + if (status) { + dev_err(&adapter->pdev->dev, "et131x_init_recv FAILED\n"); + et131x_adapter_memory_free(adapter); + } + return status; +} + +static void et131x_adjust_link(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + struct phy_device *phydev = adapter->phydev; + + if (!phydev) + return; + if (phydev->link == adapter->link) + return; + + /* Check to see if we are in coma mode and if + * so, disable it because we will not be able + * to read PHY values until we are out. + */ + if (et1310_in_phy_coma(adapter)) + et1310_disable_phy_coma(adapter); + + adapter->link = phydev->link; + phy_print_status(phydev); + + if (phydev->link) { + adapter->boot_coma = 20; + if (phydev->speed == SPEED_10) { + u16 register18; + + et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, + ®ister18); + et131x_mii_write(adapter, phydev->addr, + PHY_MPHY_CONTROL_REG, + register18 | 0x4); + et131x_mii_write(adapter, phydev->addr, PHY_INDEX_REG, + register18 | 0x8402); + et131x_mii_write(adapter, phydev->addr, PHY_DATA_REG, + register18 | 511); + et131x_mii_write(adapter, phydev->addr, + PHY_MPHY_CONTROL_REG, register18); + } + + et1310_config_flow_control(adapter); + + if (phydev->speed == SPEED_1000 && + adapter->registry_jumbo_packet > 2048) { + u16 reg; + + et131x_mii_read(adapter, PHY_CONFIG, ®); + reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH; + reg |= ET_PHY_CONFIG_FIFO_DEPTH_32; + et131x_mii_write(adapter, phydev->addr, PHY_CONFIG, + reg); + } + + et131x_set_rx_dma_timer(adapter); + et1310_config_mac_regs2(adapter); + } else { + adapter->boot_coma = 0; + + if (phydev->speed == SPEED_10) { + u16 register18; + + et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, + ®ister18); + et131x_mii_write(adapter, phydev->addr, + PHY_MPHY_CONTROL_REG, + register18 | 0x4); + et131x_mii_write(adapter, phydev->addr, + PHY_INDEX_REG, register18 | 0x8402); + et131x_mii_write(adapter, phydev->addr, + PHY_DATA_REG, register18 | 511); + et131x_mii_write(adapter, phydev->addr, + PHY_MPHY_CONTROL_REG, register18); + } + + et131x_free_busy_send_packets(adapter); + et131x_init_send(adapter); + + /* Bring the device back to the state it was during + * init prior to autonegotiation being complete. This + * way, when we get the auto-neg complete interrupt, + * we can complete init by calling config_mac_regs2. + */ + et131x_soft_reset(adapter); + + et131x_adapter_setup(adapter); + + et131x_disable_txrx(netdev); + et131x_enable_txrx(netdev); + } +} + +static int et131x_mii_probe(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + struct phy_device *phydev = NULL; + + phydev = phy_find_first(adapter->mii_bus); + if (!phydev) { + dev_err(&adapter->pdev->dev, "no PHY found\n"); + return -ENODEV; + } + + phydev = phy_connect(netdev, dev_name(&phydev->dev), + &et131x_adjust_link, PHY_INTERFACE_MODE_MII); + + if (IS_ERR(phydev)) { + dev_err(&adapter->pdev->dev, "Could not attach to PHY\n"); + return PTR_ERR(phydev); + } + + phydev->supported &= (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_MII | + SUPPORTED_TP); + + if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST) + phydev->supported |= SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full; + + phydev->advertising = phydev->supported; + phydev->autoneg = AUTONEG_ENABLE; + adapter->phydev = phydev; + + dev_info(&adapter->pdev->dev, + "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", + phydev->drv->name, dev_name(&phydev->dev)); + + return 0; +} + +static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev, + struct pci_dev *pdev) +{ + static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 }; + + struct et131x_adapter *adapter; + + adapter = netdev_priv(netdev); + adapter->pdev = pci_dev_get(pdev); + adapter->netdev = netdev; + + spin_lock_init(&adapter->tcb_send_qlock); + spin_lock_init(&adapter->tcb_ready_qlock); + spin_lock_init(&adapter->rcv_lock); + + adapter->registry_jumbo_packet = 1514; /* 1514-9216 */ + + ether_addr_copy(adapter->addr, default_mac); + + return adapter; +} + +static void et131x_pci_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct et131x_adapter *adapter = netdev_priv(netdev); + + unregister_netdev(netdev); + netif_napi_del(&adapter->napi); + phy_disconnect(adapter->phydev); + mdiobus_unregister(adapter->mii_bus); + kfree(adapter->mii_bus->irq); + mdiobus_free(adapter->mii_bus); + + et131x_adapter_memory_free(adapter); + iounmap(adapter->regs); + pci_dev_put(pdev); + + free_netdev(netdev); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static void et131x_up(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + + et131x_enable_txrx(netdev); + phy_start(adapter->phydev); +} + +static void et131x_down(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + + /* Save the timestamp for the TX watchdog, prevent a timeout */ + netdev->trans_start = jiffies; + + phy_stop(adapter->phydev); + et131x_disable_txrx(netdev); +} + +#ifdef CONFIG_PM_SLEEP +static int et131x_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + + if (netif_running(netdev)) { + netif_device_detach(netdev); + et131x_down(netdev); + pci_save_state(pdev); + } + + return 0; +} + +static int et131x_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + + if (netif_running(netdev)) { + pci_restore_state(pdev); + et131x_up(netdev); + netif_device_attach(netdev); + } + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume); + +static irqreturn_t et131x_isr(int irq, void *dev_id) +{ + bool handled = true; + bool enable_interrupts = true; + struct net_device *netdev = dev_id; + struct et131x_adapter *adapter = netdev_priv(netdev); + struct address_map __iomem *iomem = adapter->regs; + struct rx_ring *rx_ring = &adapter->rx_ring; + struct tx_ring *tx_ring = &adapter->tx_ring; + u32 status; + + if (!netif_device_present(netdev)) { + handled = false; + enable_interrupts = false; + goto out; + } + + et131x_disable_interrupts(adapter); + + status = readl(&adapter->regs->global.int_status); + + if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH) + status &= ~INT_MASK_ENABLE; + else + status &= ~INT_MASK_ENABLE_NO_FLOW; + + /* Make sure this is our interrupt */ + if (!status) { + handled = false; + et131x_enable_interrupts(adapter); + goto out; + } + + /* This is our interrupt, so process accordingly */ + if (status & ET_INTR_WATCHDOG) { + struct tcb *tcb = tx_ring->send_head; + + if (tcb) + if (++tcb->stale > 1) + status |= ET_INTR_TXDMA_ISR; + + if (rx_ring->unfinished_receives) + status |= ET_INTR_RXDMA_XFR_DONE; + else if (tcb == NULL) + writel(0, &adapter->regs->global.watchdog_timer); + + status &= ~ET_INTR_WATCHDOG; + } + + if (status & (ET_INTR_RXDMA_XFR_DONE | ET_INTR_TXDMA_ISR)) { + enable_interrupts = false; + napi_schedule(&adapter->napi); + } + + status &= ~(ET_INTR_TXDMA_ISR | ET_INTR_RXDMA_XFR_DONE); + + if (!status) + goto out; + + if (status & ET_INTR_TXDMA_ERR) { + /* Following read also clears the register (COR) */ + u32 txdma_err = readl(&iomem->txdma.tx_dma_error); + + dev_warn(&adapter->pdev->dev, + "TXDMA_ERR interrupt, error = %d\n", + txdma_err); + } + + if (status & (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) { + /* This indicates the number of unused buffers in RXDMA free + * buffer ring 0 is <= the limit you programmed. Free buffer + * resources need to be returned. Free buffers are consumed as + * packets are passed from the network to the host. The host + * becomes aware of the packets from the contents of the packet + * status ring. This ring is queried when the packet done + * interrupt occurs. Packets are then passed to the OS. When + * the OS is done with the packets the resources can be + * returned to the ET1310 for re-use. This interrupt is one + * method of returning resources. + */ + + /* If the user has flow control on, then we will + * send a pause packet, otherwise just exit + */ + if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH) { + u32 pm_csr; + + /* Tell the device to send a pause packet via the back + * pressure register (bp req and bp xon/xoff) + */ + pm_csr = readl(&iomem->global.pm_csr); + if (!et1310_in_phy_coma(adapter)) + writel(3, &iomem->txmac.bp_ctrl); + } + } + + /* Handle Packet Status Ring Low Interrupt */ + if (status & ET_INTR_RXDMA_STAT_LOW) { + /* Same idea as with the two Free Buffer Rings. Packets going + * from the network to the host each consume a free buffer + * resource and a packet status resource. These resources are + * passed to the OS. When the OS is done with the resources, + * they need to be returned to the ET1310. This is one method + * of returning the resources. + */ + } + + if (status & ET_INTR_RXDMA_ERR) { + /* The rxdma_error interrupt is sent when a time-out on a + * request issued by the JAGCore has occurred or a completion is + * returned with an un-successful status. In both cases the + * request is considered complete. The JAGCore will + * automatically re-try the request in question. Normally + * information on events like these are sent to the host using + * the "Advanced Error Reporting" capability. This interrupt is + * another way of getting similar information. The only thing + * required is to clear the interrupt by reading the ISR in the + * global resources. The JAGCore will do a re-try on the + * request. Normally you should never see this interrupt. If + * you start to see this interrupt occurring frequently then + * something bad has occurred. A reset might be the thing to do. + */ + /* TRAP();*/ + + dev_warn(&adapter->pdev->dev, "RxDMA_ERR interrupt, error %x\n", + readl(&iomem->txmac.tx_test)); + } + + /* Handle the Wake on LAN Event */ + if (status & ET_INTR_WOL) { + /* This is a secondary interrupt for wake on LAN. The driver + * should never see this, if it does, something serious is + * wrong. + */ + dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n"); + } + + if (status & ET_INTR_TXMAC) { + u32 err = readl(&iomem->txmac.err); + + /* When any of the errors occur and TXMAC generates an + * interrupt to report these errors, it usually means that + * TXMAC has detected an error in the data stream retrieved + * from the on-chip Tx Q. All of these errors are catastrophic + * and TXMAC won't be able to recover data when these errors + * occur. In a nutshell, the whole Tx path will have to be reset + * and re-configured afterwards. + */ + dev_warn(&adapter->pdev->dev, "TXMAC interrupt, error 0x%08x\n", + err); + + /* If we are debugging, we want to see this error, otherwise we + * just want the device to be reset and continue + */ + } + + if (status & ET_INTR_RXMAC) { + /* These interrupts are catastrophic to the device, what we need + * to do is disable the interrupts and set the flag to cause us + * to reset so we can solve this issue. + */ + dev_warn(&adapter->pdev->dev, + "RXMAC interrupt, error 0x%08x. Requesting reset\n", + readl(&iomem->rxmac.err_reg)); + + dev_warn(&adapter->pdev->dev, + "Enable 0x%08x, Diag 0x%08x\n", + readl(&iomem->rxmac.ctrl), + readl(&iomem->rxmac.rxq_diag)); + + /* If we are debugging, we want to see this error, otherwise we + * just want the device to be reset and continue + */ + } + + if (status & ET_INTR_MAC_STAT) { + /* This means at least one of the un-masked counters in the + * MAC_STAT block has rolled over. Use this to maintain the top, + * software managed bits of the counter(s). + */ + et1310_handle_macstat_interrupt(adapter); + } + + if (status & ET_INTR_SLV_TIMEOUT) { + /* This means a timeout has occurred on a read or write request + * to one of the JAGCore registers. The Global Resources block + * has terminated the request and on a read request, returned a + * "fake" value. The most likely reasons are: Bad Address or the + * addressed module is in a power-down state and can't respond. + */ + } + +out: + if (enable_interrupts) + et131x_enable_interrupts(adapter); + + return IRQ_RETVAL(handled); +} + +static int et131x_poll(struct napi_struct *napi, int budget) +{ + struct et131x_adapter *adapter = + container_of(napi, struct et131x_adapter, napi); + int work_done = et131x_handle_recv_pkts(adapter, budget); + + et131x_handle_send_pkts(adapter); + + if (work_done < budget) { + napi_complete(&adapter->napi); + et131x_enable_interrupts(adapter); + } + + return work_done; +} + +/* et131x_stats - Return the current device statistics */ +static struct net_device_stats *et131x_stats(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + struct net_device_stats *stats = &adapter->netdev->stats; + struct ce_stats *devstat = &adapter->stats; + + stats->rx_errors = devstat->rx_length_errs + + devstat->rx_align_errs + + devstat->rx_crc_errs + + devstat->rx_code_violations + + devstat->rx_other_errs; + stats->tx_errors = devstat->tx_max_pkt_errs; + stats->multicast = devstat->multicast_pkts_rcvd; + stats->collisions = devstat->tx_collisions; + + stats->rx_length_errors = devstat->rx_length_errs; + stats->rx_over_errors = devstat->rx_overflows; + stats->rx_crc_errors = devstat->rx_crc_errs; + stats->rx_dropped = devstat->rcvd_pkts_dropped; + + /* NOTE: Not used, can't find analogous statistics */ + /* stats->rx_frame_errors = devstat->; */ + /* stats->rx_fifo_errors = devstat->; */ + /* stats->rx_missed_errors = devstat->; */ + + /* stats->tx_aborted_errors = devstat->; */ + /* stats->tx_carrier_errors = devstat->; */ + /* stats->tx_fifo_errors = devstat->; */ + /* stats->tx_heartbeat_errors = devstat->; */ + /* stats->tx_window_errors = devstat->; */ + return stats; +} + +static int et131x_open(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + unsigned int irq = pdev->irq; + int result; + + /* Start the timer to track NIC errors */ + init_timer(&adapter->error_timer); + adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000; + adapter->error_timer.function = et131x_error_timer_handler; + adapter->error_timer.data = (unsigned long)adapter; + add_timer(&adapter->error_timer); + + result = request_irq(irq, et131x_isr, + IRQF_SHARED, netdev->name, netdev); + if (result) { + dev_err(&pdev->dev, "could not register IRQ %d\n", irq); + return result; + } + + adapter->flags |= FMP_ADAPTER_INTERRUPT_IN_USE; + + napi_enable(&adapter->napi); + + et131x_up(netdev); + + return result; +} + +static int et131x_close(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + + et131x_down(netdev); + napi_disable(&adapter->napi); + + adapter->flags &= ~FMP_ADAPTER_INTERRUPT_IN_USE; + free_irq(adapter->pdev->irq, netdev); + + /* Stop the error timer */ + return del_timer_sync(&adapter->error_timer); +} + +static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, + int cmd) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + + if (!adapter->phydev) + return -EINVAL; + + return phy_mii_ioctl(adapter->phydev, reqbuf, cmd); +} + +/* et131x_set_packet_filter - Configures the Rx Packet filtering */ +static int et131x_set_packet_filter(struct et131x_adapter *adapter) +{ + int filter = adapter->packet_filter; + u32 ctrl; + u32 pf_ctrl; + + ctrl = readl(&adapter->regs->rxmac.ctrl); + pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl); + + /* Default to disabled packet filtering */ + ctrl |= 0x04; + + /* Set us to be in promiscuous mode so we receive everything, this + * is also true when we get a packet filter of 0 + */ + if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0) + pf_ctrl &= ~7; /* Clear filter bits */ + else { + /* Set us up with Multicast packet filtering. Three cases are + * possible - (1) we have a multi-cast list, (2) we receive ALL + * multicast entries or (3) we receive none. + */ + if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST) + pf_ctrl &= ~2; /* Multicast filter bit */ + else { + et1310_setup_device_for_multicast(adapter); + pf_ctrl |= 2; + ctrl &= ~0x04; + } + + /* Set us up with Unicast packet filtering */ + if (filter & ET131X_PACKET_TYPE_DIRECTED) { + et1310_setup_device_for_unicast(adapter); + pf_ctrl |= 4; + ctrl &= ~0x04; + } + + /* Set us up with Broadcast packet filtering */ + if (filter & ET131X_PACKET_TYPE_BROADCAST) { + pf_ctrl |= 1; /* Broadcast filter bit */ + ctrl &= ~0x04; + } else { + pf_ctrl &= ~1; + } + + /* Setup the receive mac configuration registers - Packet + * Filter control + the enable / disable for packet filter + * in the control reg. + */ + writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl); + writel(ctrl, &adapter->regs->rxmac.ctrl); + } + return 0; +} + +static void et131x_multicast(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + int packet_filter; + struct netdev_hw_addr *ha; + int i; + + /* Before we modify the platform-independent filter flags, store them + * locally. This allows us to determine if anything's changed and if + * we even need to bother the hardware + */ + packet_filter = adapter->packet_filter; + + /* Clear the 'multicast' flag locally; because we only have a single + * flag to check multicast, and multiple multicast addresses can be + * set, this is the easiest way to determine if more than one + * multicast address is being set. + */ + packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST; + + /* Check the net_device flags and set the device independent flags + * accordingly + */ + if (netdev->flags & IFF_PROMISC) + adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS; + else + adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS; + + if ((netdev->flags & IFF_ALLMULTI) || + (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST)) + adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST; + + if (netdev_mc_count(netdev) < 1) { + adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST; + adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST; + } else { + adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST; + } + + /* Set values in the private adapter struct */ + i = 0; + netdev_for_each_mc_addr(ha, netdev) { + if (i == NIC_MAX_MCAST_LIST) + break; + ether_addr_copy(adapter->multicast_list[i++], ha->addr); + } + adapter->multicast_addr_count = i; + + /* Are the new flags different from the previous ones? If not, then no + * action is required + * + * NOTE - This block will always update the multicast_list with the + * hardware, even if the addresses aren't the same. + */ + if (packet_filter != adapter->packet_filter) + et131x_set_packet_filter(adapter); +} + +static netdev_tx_t et131x_tx(struct sk_buff *skb, struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + struct tx_ring *tx_ring = &adapter->tx_ring; + + /* stop the queue if it's getting full */ + if (tx_ring->used >= NUM_TCB - 1 && !netif_queue_stopped(netdev)) + netif_stop_queue(netdev); + + /* Save the timestamp for the TX timeout watchdog */ + netdev->trans_start = jiffies; + + /* TCB is not available */ + if (tx_ring->used >= NUM_TCB) + goto drop_err; + + if ((adapter->flags & FMP_ADAPTER_FAIL_SEND_MASK) || + !netif_carrier_ok(netdev)) + goto drop_err; + + if (send_packet(skb, adapter)) + goto drop_err; + + return NETDEV_TX_OK; + +drop_err: + dev_kfree_skb_any(skb); + adapter->netdev->stats.tx_dropped++; + return NETDEV_TX_OK; +} + +/* et131x_tx_timeout - Timeout handler + * + * The handler called when a Tx request times out. The timeout period is + * specified by the 'tx_timeo" element in the net_device structure (see + * et131x_alloc_device() to see how this value is set). + */ +static void et131x_tx_timeout(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + struct tx_ring *tx_ring = &adapter->tx_ring; + struct tcb *tcb; + unsigned long flags; + + /* If the device is closed, ignore the timeout */ + if (~(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE)) + return; + + /* Any nonrecoverable hardware error? + * Checks adapter->flags for any failure in phy reading + */ + if (adapter->flags & FMP_ADAPTER_NON_RECOVER_ERROR) + return; + + /* Hardware failure? */ + if (adapter->flags & FMP_ADAPTER_HARDWARE_ERROR) { + dev_err(&adapter->pdev->dev, "hardware error - reset\n"); + return; + } + + /* Is send stuck? */ + spin_lock_irqsave(&adapter->tcb_send_qlock, flags); + tcb = tx_ring->send_head; + spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); + + if (tcb) { + tcb->count++; + + if (tcb->count > NIC_SEND_HANG_THRESHOLD) { + dev_warn(&adapter->pdev->dev, + "Send stuck - reset. tcb->WrIndex %x\n", + tcb->index); + + adapter->netdev->stats.tx_errors++; + + /* perform reset of tx/rx */ + et131x_disable_txrx(netdev); + et131x_enable_txrx(netdev); + } + } +} + +static int et131x_change_mtu(struct net_device *netdev, int new_mtu) +{ + int result = 0; + struct et131x_adapter *adapter = netdev_priv(netdev); + + if (new_mtu < 64 || new_mtu > 9216) + return -EINVAL; + + et131x_disable_txrx(netdev); + + netdev->mtu = new_mtu; + + et131x_adapter_memory_free(adapter); + + /* Set the config parameter for Jumbo Packet support */ + adapter->registry_jumbo_packet = new_mtu + 14; + et131x_soft_reset(adapter); + + result = et131x_adapter_memory_alloc(adapter); + if (result != 0) { + dev_warn(&adapter->pdev->dev, + "Change MTU failed; couldn't re-alloc DMA memory\n"); + return result; + } + + et131x_init_send(adapter); + et131x_hwaddr_init(adapter); + ether_addr_copy(netdev->dev_addr, adapter->addr); + + /* Init the device with the new settings */ + et131x_adapter_setup(adapter); + et131x_enable_txrx(netdev); + + return result; +} + +static const struct net_device_ops et131x_netdev_ops = { + .ndo_open = et131x_open, + .ndo_stop = et131x_close, + .ndo_start_xmit = et131x_tx, + .ndo_set_rx_mode = et131x_multicast, + .ndo_tx_timeout = et131x_tx_timeout, + .ndo_change_mtu = et131x_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_get_stats = et131x_stats, + .ndo_do_ioctl = et131x_ioctl, +}; + +static int et131x_pci_setup(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct et131x_adapter *adapter; + int rc; + int ii; + + rc = pci_enable_device(pdev); + if (rc < 0) { + dev_err(&pdev->dev, "pci_enable_device() failed\n"); + goto out; + } + + /* Perform some basic PCI checks */ + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, "Can't find PCI device's base address\n"); + rc = -ENODEV; + goto err_disable; + } + + rc = pci_request_regions(pdev, DRIVER_NAME); + if (rc < 0) { + dev_err(&pdev->dev, "Can't get PCI resources\n"); + goto err_disable; + } + + pci_set_master(pdev); + + /* Check the DMA addressing support of this device */ + if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) && + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + dev_err(&pdev->dev, "No usable DMA addressing method\n"); + rc = -EIO; + goto err_release_res; + } + + netdev = alloc_etherdev(sizeof(struct et131x_adapter)); + if (!netdev) { + dev_err(&pdev->dev, "Couldn't alloc netdev struct\n"); + rc = -ENOMEM; + goto err_release_res; + } + + netdev->watchdog_timeo = ET131X_TX_TIMEOUT; + netdev->netdev_ops = &et131x_netdev_ops; + + SET_NETDEV_DEV(netdev, &pdev->dev); + netdev->ethtool_ops = &et131x_ethtool_ops; + + adapter = et131x_adapter_init(netdev, pdev); + + rc = et131x_pci_init(adapter, pdev); + if (rc < 0) + goto err_free_dev; + + /* Map the bus-relative registers to system virtual memory */ + adapter->regs = pci_ioremap_bar(pdev, 0); + if (!adapter->regs) { + dev_err(&pdev->dev, "Cannot map device registers\n"); + rc = -ENOMEM; + goto err_free_dev; + } + + /* If Phy COMA mode was enabled when we went down, disable it here. */ + writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr); + + et131x_soft_reset(adapter); + et131x_disable_interrupts(adapter); + + rc = et131x_adapter_memory_alloc(adapter); + if (rc < 0) { + dev_err(&pdev->dev, "Could not alloc adapter memory (DMA)\n"); + goto err_iounmap; + } + + et131x_init_send(adapter); + + netif_napi_add(netdev, &adapter->napi, et131x_poll, 64); + + ether_addr_copy(netdev->dev_addr, adapter->addr); + + rc = -ENOMEM; + + adapter->mii_bus = mdiobus_alloc(); + if (!adapter->mii_bus) { + dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n"); + goto err_mem_free; + } + + adapter->mii_bus->name = "et131x_eth_mii"; + snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x", + (adapter->pdev->bus->number << 8) | adapter->pdev->devfn); + adapter->mii_bus->priv = netdev; + adapter->mii_bus->read = et131x_mdio_read; + adapter->mii_bus->write = et131x_mdio_write; + adapter->mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), + GFP_KERNEL); + if (!adapter->mii_bus->irq) + goto err_mdio_free; + + for (ii = 0; ii < PHY_MAX_ADDR; ii++) + adapter->mii_bus->irq[ii] = PHY_POLL; + + rc = mdiobus_register(adapter->mii_bus); + if (rc < 0) { + dev_err(&pdev->dev, "failed to register MII bus\n"); + goto err_mdio_free_irq; + } + + rc = et131x_mii_probe(netdev); + if (rc < 0) { + dev_err(&pdev->dev, "failed to probe MII bus\n"); + goto err_mdio_unregister; + } + + et131x_adapter_setup(adapter); + + /* Init variable for counting how long we do not have link status */ + adapter->boot_coma = 0; + et1310_disable_phy_coma(adapter); + + /* We can enable interrupts now + * + * NOTE - Because registration of interrupt handler is done in the + * device's open(), defer enabling device interrupts to that + * point + */ + + rc = register_netdev(netdev); + if (rc < 0) { + dev_err(&pdev->dev, "register_netdev() failed\n"); + goto err_phy_disconnect; + } + + /* Register the net_device struct with the PCI subsystem. Save a copy + * of the PCI config space for this device now that the device has + * been initialized, just in case it needs to be quickly restored. + */ + pci_set_drvdata(pdev, netdev); +out: + return rc; + +err_phy_disconnect: + phy_disconnect(adapter->phydev); +err_mdio_unregister: + mdiobus_unregister(adapter->mii_bus); +err_mdio_free_irq: + kfree(adapter->mii_bus->irq); +err_mdio_free: + mdiobus_free(adapter->mii_bus); +err_mem_free: + et131x_adapter_memory_free(adapter); +err_iounmap: + iounmap(adapter->regs); +err_free_dev: + pci_dev_put(pdev); + free_netdev(netdev); +err_release_res: + pci_release_regions(pdev); +err_disable: + pci_disable_device(pdev); + goto out; +} + +static const struct pci_device_id et131x_pci_table[] = { + { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL}, + { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL}, + { 0,} +}; +MODULE_DEVICE_TABLE(pci, et131x_pci_table); + +static struct pci_driver et131x_driver = { + .name = DRIVER_NAME, + .id_table = et131x_pci_table, + .probe = et131x_pci_setup, + .remove = et131x_pci_remove, + .driver.pm = &et131x_pm_ops, +}; + +module_pci_driver(et131x_driver); diff --git a/drivers/net/ethernet/agere/et131x.h b/drivers/net/ethernet/agere/et131x.h new file mode 100644 index 000000000000..be9a11c02526 --- /dev/null +++ b/drivers/net/ethernet/agere/et131x.h @@ -0,0 +1,1433 @@ +/* Copyright © 2005 Agere Systems Inc. + * All rights reserved. + * http://www.agere.com + * + * SOFTWARE LICENSE + * + * This software is provided subject to the following terms and conditions, + * which you should read carefully before using the software. Using this + * software indicates your acceptance of these terms and conditions. If you do + * not agree with these terms and conditions, do not use the software. + * + * Copyright © 2005 Agere Systems Inc. + * All rights reserved. + * + * Redistribution and use in source or binary forms, with or without + * modifications, are permitted provided that the following conditions are met: + * + * . Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following Disclaimer as comments in the code as + * well as in the documentation and/or other materials provided with the + * distribution. + * + * . Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following Disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * . Neither the name of Agere Systems Inc. nor the names of the contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * Disclaimer + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY + * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN + * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + * + */ + +#define DRIVER_NAME "et131x" +#define DRIVER_VERSION "v2.0" + +/* EEPROM registers */ + +/* LBCIF Register Groups (addressed via 32-bit offsets) */ +#define LBCIF_DWORD0_GROUP 0xAC +#define LBCIF_DWORD1_GROUP 0xB0 + +/* LBCIF Registers (addressed via 8-bit offsets) */ +#define LBCIF_ADDRESS_REGISTER 0xAC +#define LBCIF_DATA_REGISTER 0xB0 +#define LBCIF_CONTROL_REGISTER 0xB1 +#define LBCIF_STATUS_REGISTER 0xB2 + +/* LBCIF Control Register Bits */ +#define LBCIF_CONTROL_SEQUENTIAL_READ 0x01 +#define LBCIF_CONTROL_PAGE_WRITE 0x02 +#define LBCIF_CONTROL_EEPROM_RELOAD 0x08 +#define LBCIF_CONTROL_TWO_BYTE_ADDR 0x20 +#define LBCIF_CONTROL_I2C_WRITE 0x40 +#define LBCIF_CONTROL_LBCIF_ENABLE 0x80 + +/* LBCIF Status Register Bits */ +#define LBCIF_STATUS_PHY_QUEUE_AVAIL 0x01 +#define LBCIF_STATUS_I2C_IDLE 0x02 +#define LBCIF_STATUS_ACK_ERROR 0x04 +#define LBCIF_STATUS_GENERAL_ERROR 0x08 +#define LBCIF_STATUS_CHECKSUM_ERROR 0x40 +#define LBCIF_STATUS_EEPROM_PRESENT 0x80 + +/* START OF GLOBAL REGISTER ADDRESS MAP */ +/* 10bit registers + * + * Tx queue start address reg in global address map at address 0x0000 + * tx queue end address reg in global address map at address 0x0004 + * rx queue start address reg in global address map at address 0x0008 + * rx queue end address reg in global address map at address 0x000C + */ + +/* structure for power management control status reg in global address map + * located at address 0x0010 + * jagcore_rx_rdy bit 9 + * jagcore_tx_rdy bit 8 + * phy_lped_en bit 7 + * phy_sw_coma bit 6 + * rxclk_gate bit 5 + * txclk_gate bit 4 + * sysclk_gate bit 3 + * jagcore_rx_en bit 2 + * jagcore_tx_en bit 1 + * gigephy_en bit 0 + */ +#define ET_PM_PHY_SW_COMA 0x40 +#define ET_PMCSR_INIT 0x38 + +/* Interrupt status reg at address 0x0018 + */ +#define ET_INTR_TXDMA_ISR 0x00000008 +#define ET_INTR_TXDMA_ERR 0x00000010 +#define ET_INTR_RXDMA_XFR_DONE 0x00000020 +#define ET_INTR_RXDMA_FB_R0_LOW 0x00000040 +#define ET_INTR_RXDMA_FB_R1_LOW 0x00000080 +#define ET_INTR_RXDMA_STAT_LOW 0x00000100 +#define ET_INTR_RXDMA_ERR 0x00000200 +#define ET_INTR_WATCHDOG 0x00004000 +#define ET_INTR_WOL 0x00008000 +#define ET_INTR_PHY 0x00010000 +#define ET_INTR_TXMAC 0x00020000 +#define ET_INTR_RXMAC 0x00040000 +#define ET_INTR_MAC_STAT 0x00080000 +#define ET_INTR_SLV_TIMEOUT 0x00100000 + +/* Interrupt mask register at address 0x001C + * Interrupt alias clear mask reg at address 0x0020 + * Interrupt status alias reg at address 0x0024 + * + * Same masks as above + */ + +/* Software reset reg at address 0x0028 + * 0: txdma_sw_reset + * 1: rxdma_sw_reset + * 2: txmac_sw_reset + * 3: rxmac_sw_reset + * 4: mac_sw_reset + * 5: mac_stat_sw_reset + * 6: mmc_sw_reset + *31: selfclr_disable + */ +#define ET_RESET_ALL 0x007F + +/* SLV Timer reg at address 0x002C (low 24 bits) + */ + +/* MSI Configuration reg at address 0x0030 + */ +#define ET_MSI_VECTOR 0x0000001F +#define ET_MSI_TC 0x00070000 + +/* Loopback reg located at address 0x0034 + */ +#define ET_LOOP_MAC 0x00000001 +#define ET_LOOP_DMA 0x00000002 + +/* GLOBAL Module of JAGCore Address Mapping + * Located at address 0x0000 + */ +struct global_regs { /* Location: */ + u32 txq_start_addr; /* 0x0000 */ + u32 txq_end_addr; /* 0x0004 */ + u32 rxq_start_addr; /* 0x0008 */ + u32 rxq_end_addr; /* 0x000C */ + u32 pm_csr; /* 0x0010 */ + u32 unused; /* 0x0014 */ + u32 int_status; /* 0x0018 */ + u32 int_mask; /* 0x001C */ + u32 int_alias_clr_en; /* 0x0020 */ + u32 int_status_alias; /* 0x0024 */ + u32 sw_reset; /* 0x0028 */ + u32 slv_timer; /* 0x002C */ + u32 msi_config; /* 0x0030 */ + u32 loopback; /* 0x0034 */ + u32 watchdog_timer; /* 0x0038 */ +}; + +/* START OF TXDMA REGISTER ADDRESS MAP */ +/* txdma control status reg at address 0x1000 + */ +#define ET_TXDMA_CSR_HALT 0x00000001 +#define ET_TXDMA_DROP_TLP 0x00000002 +#define ET_TXDMA_CACHE_THRS 0x000000F0 +#define ET_TXDMA_CACHE_SHIFT 4 +#define ET_TXDMA_SNGL_EPKT 0x00000100 +#define ET_TXDMA_CLASS 0x00001E00 + +/* structure for txdma packet ring base address hi reg in txdma address map + * located at address 0x1004 + * Defined earlier (u32) + */ + +/* structure for txdma packet ring base address low reg in txdma address map + * located at address 0x1008 + * Defined earlier (u32) + */ + +/* structure for txdma packet ring number of descriptor reg in txdma address + * map. Located at address 0x100C + * + * 31-10: unused + * 9-0: pr ndes + */ +#define ET_DMA12_MASK 0x0FFF /* 12 bit mask for DMA12W types */ +#define ET_DMA12_WRAP 0x1000 +#define ET_DMA10_MASK 0x03FF /* 10 bit mask for DMA10W types */ +#define ET_DMA10_WRAP 0x0400 +#define ET_DMA4_MASK 0x000F /* 4 bit mask for DMA4W types */ +#define ET_DMA4_WRAP 0x0010 + +#define INDEX12(x) ((x) & ET_DMA12_MASK) +#define INDEX10(x) ((x) & ET_DMA10_MASK) +#define INDEX4(x) ((x) & ET_DMA4_MASK) + +/* 10bit DMA with wrap + * txdma tx queue write address reg in txdma address map at 0x1010 + * txdma tx queue write address external reg in txdma address map at 0x1014 + * txdma tx queue read address reg in txdma address map at 0x1018 + * + * u32 + * txdma status writeback address hi reg in txdma address map at0x101C + * txdma status writeback address lo reg in txdma address map at 0x1020 + * + * 10bit DMA with wrap + * txdma service request reg in txdma address map at 0x1024 + * structure for txdma service complete reg in txdma address map at 0x1028 + * + * 4bit DMA with wrap + * txdma tx descriptor cache read index reg in txdma address map at 0x102C + * txdma tx descriptor cache write index reg in txdma address map at 0x1030 + * + * txdma error reg in txdma address map at address 0x1034 + * 0: PyldResend + * 1: PyldRewind + * 4: DescrResend + * 5: DescrRewind + * 8: WrbkResend + * 9: WrbkRewind + */ + +/* Tx DMA Module of JAGCore Address Mapping + * Located at address 0x1000 + */ +struct txdma_regs { /* Location: */ + u32 csr; /* 0x1000 */ + u32 pr_base_hi; /* 0x1004 */ + u32 pr_base_lo; /* 0x1008 */ + u32 pr_num_des; /* 0x100C */ + u32 txq_wr_addr; /* 0x1010 */ + u32 txq_wr_addr_ext; /* 0x1014 */ + u32 txq_rd_addr; /* 0x1018 */ + u32 dma_wb_base_hi; /* 0x101C */ + u32 dma_wb_base_lo; /* 0x1020 */ + u32 service_request; /* 0x1024 */ + u32 service_complete; /* 0x1028 */ + u32 cache_rd_index; /* 0x102C */ + u32 cache_wr_index; /* 0x1030 */ + u32 tx_dma_error; /* 0x1034 */ + u32 desc_abort_cnt; /* 0x1038 */ + u32 payload_abort_cnt; /* 0x103c */ + u32 writeback_abort_cnt; /* 0x1040 */ + u32 desc_timeout_cnt; /* 0x1044 */ + u32 payload_timeout_cnt; /* 0x1048 */ + u32 writeback_timeout_cnt; /* 0x104c */ + u32 desc_error_cnt; /* 0x1050 */ + u32 payload_error_cnt; /* 0x1054 */ + u32 writeback_error_cnt; /* 0x1058 */ + u32 dropped_tlp_cnt; /* 0x105c */ + u32 new_service_complete; /* 0x1060 */ + u32 ethernet_packet_cnt; /* 0x1064 */ +}; + +/* END OF TXDMA REGISTER ADDRESS MAP */ + +/* START OF RXDMA REGISTER ADDRESS MAP */ +/* structure for control status reg in rxdma address map + * Located at address 0x2000 + * + * CSR + * 0: halt + * 1-3: tc + * 4: fbr_big_endian + * 5: psr_big_endian + * 6: pkt_big_endian + * 7: dma_big_endian + * 8-9: fbr0_size + * 10: fbr0_enable + * 11-12: fbr1_size + * 13: fbr1_enable + * 14: unused + * 15: pkt_drop_disable + * 16: pkt_done_flush + * 17: halt_status + * 18-31: unused + */ +#define ET_RXDMA_CSR_HALT 0x0001 +#define ET_RXDMA_CSR_FBR0_SIZE_LO 0x0100 +#define ET_RXDMA_CSR_FBR0_SIZE_HI 0x0200 +#define ET_RXDMA_CSR_FBR0_ENABLE 0x0400 +#define ET_RXDMA_CSR_FBR1_SIZE_LO 0x0800 +#define ET_RXDMA_CSR_FBR1_SIZE_HI 0x1000 +#define ET_RXDMA_CSR_FBR1_ENABLE 0x2000 +#define ET_RXDMA_CSR_HALT_STATUS 0x00020000 + +/* structure for dma writeback lo reg in rxdma address map + * located at address 0x2004 + * Defined earlier (u32) + */ + +/* structure for dma writeback hi reg in rxdma address map + * located at address 0x2008 + * Defined earlier (u32) + */ + +/* structure for number of packets done reg in rxdma address map + * located at address 0x200C + * + * 31-8: unused + * 7-0: num done + */ + +/* structure for max packet time reg in rxdma address map + * located at address 0x2010 + * + * 31-18: unused + * 17-0: time done + */ + +/* structure for rx queue read address reg in rxdma address map + * located at address 0x2014 + * Defined earlier (u32) + */ + +/* structure for rx queue read address external reg in rxdma address map + * located at address 0x2018 + * Defined earlier (u32) + */ + +/* structure for rx queue write address reg in rxdma address map + * located at address 0x201C + * Defined earlier (u32) + */ + +/* structure for packet status ring base address lo reg in rxdma address map + * located at address 0x2020 + * Defined earlier (u32) + */ + +/* structure for packet status ring base address hi reg in rxdma address map + * located at address 0x2024 + * Defined earlier (u32) + */ + +/* structure for packet status ring number of descriptors reg in rxdma address + * map. Located at address 0x2028 + * + * 31-12: unused + * 11-0: psr ndes + */ +#define ET_RXDMA_PSR_NUM_DES_MASK 0xFFF + +/* structure for packet status ring available offset reg in rxdma address map + * located at address 0x202C + * + * 31-13: unused + * 12: psr avail wrap + * 11-0: psr avail + */ + +/* structure for packet status ring full offset reg in rxdma address map + * located at address 0x2030 + * + * 31-13: unused + * 12: psr full wrap + * 11-0: psr full + */ + +/* structure for packet status ring access index reg in rxdma address map + * located at address 0x2034 + * + * 31-5: unused + * 4-0: psr_ai + */ + +/* structure for packet status ring minimum descriptors reg in rxdma address + * map. Located at address 0x2038 + * + * 31-12: unused + * 11-0: psr_min + */ + +/* structure for free buffer ring base lo address reg in rxdma address map + * located at address 0x203C + * Defined earlier (u32) + */ + +/* structure for free buffer ring base hi address reg in rxdma address map + * located at address 0x2040 + * Defined earlier (u32) + */ + +/* structure for free buffer ring number of descriptors reg in rxdma address + * map. Located at address 0x2044 + * + * 31-10: unused + * 9-0: fbr ndesc + */ + +/* structure for free buffer ring 0 available offset reg in rxdma address map + * located at address 0x2048 + * Defined earlier (u32) + */ + +/* structure for free buffer ring 0 full offset reg in rxdma address map + * located at address 0x204C + * Defined earlier (u32) + */ + +/* structure for free buffer cache 0 full offset reg in rxdma address map + * located at address 0x2050 + * + * 31-5: unused + * 4-0: fbc rdi + */ + +/* structure for free buffer ring 0 minimum descriptor reg in rxdma address map + * located at address 0x2054 + * + * 31-10: unused + * 9-0: fbr min + */ + +/* structure for free buffer ring 1 base address lo reg in rxdma address map + * located at address 0x2058 - 0x205C + * Defined earlier (RXDMA_FBR_BASE_LO_t and RXDMA_FBR_BASE_HI_t) + */ + +/* structure for free buffer ring 1 number of descriptors reg in rxdma address + * map. Located at address 0x2060 + * Defined earlier (RXDMA_FBR_NUM_DES_t) + */ + +/* structure for free buffer ring 1 available offset reg in rxdma address map + * located at address 0x2064 + * Defined Earlier (RXDMA_FBR_AVAIL_OFFSET_t) + */ + +/* structure for free buffer ring 1 full offset reg in rxdma address map + * located at address 0x2068 + * Defined Earlier (RXDMA_FBR_FULL_OFFSET_t) + */ + +/* structure for free buffer cache 1 read index reg in rxdma address map + * located at address 0x206C + * Defined Earlier (RXDMA_FBC_RD_INDEX_t) + */ + +/* structure for free buffer ring 1 minimum descriptor reg in rxdma address map + * located at address 0x2070 + * Defined Earlier (RXDMA_FBR_MIN_DES_t) + */ + +/* Rx DMA Module of JAGCore Address Mapping + * Located at address 0x2000 + */ +struct rxdma_regs { /* Location: */ + u32 csr; /* 0x2000 */ + u32 dma_wb_base_lo; /* 0x2004 */ + u32 dma_wb_base_hi; /* 0x2008 */ + u32 num_pkt_done; /* 0x200C */ + u32 max_pkt_time; /* 0x2010 */ + u32 rxq_rd_addr; /* 0x2014 */ + u32 rxq_rd_addr_ext; /* 0x2018 */ + u32 rxq_wr_addr; /* 0x201C */ + u32 psr_base_lo; /* 0x2020 */ + u32 psr_base_hi; /* 0x2024 */ + u32 psr_num_des; /* 0x2028 */ + u32 psr_avail_offset; /* 0x202C */ + u32 psr_full_offset; /* 0x2030 */ + u32 psr_access_index; /* 0x2034 */ + u32 psr_min_des; /* 0x2038 */ + u32 fbr0_base_lo; /* 0x203C */ + u32 fbr0_base_hi; /* 0x2040 */ + u32 fbr0_num_des; /* 0x2044 */ + u32 fbr0_avail_offset; /* 0x2048 */ + u32 fbr0_full_offset; /* 0x204C */ + u32 fbr0_rd_index; /* 0x2050 */ + u32 fbr0_min_des; /* 0x2054 */ + u32 fbr1_base_lo; /* 0x2058 */ + u32 fbr1_base_hi; /* 0x205C */ + u32 fbr1_num_des; /* 0x2060 */ + u32 fbr1_avail_offset; /* 0x2064 */ + u32 fbr1_full_offset; /* 0x2068 */ + u32 fbr1_rd_index; /* 0x206C */ + u32 fbr1_min_des; /* 0x2070 */ +}; + +/* END OF RXDMA REGISTER ADDRESS MAP */ + +/* START OF TXMAC REGISTER ADDRESS MAP */ +/* structure for control reg in txmac address map + * located at address 0x3000 + * + * bits + * 31-8: unused + * 7: cklseg_disable + * 6: ckbcnt_disable + * 5: cksegnum + * 4: async_disable + * 3: fc_disable + * 2: mcif_disable + * 1: mif_disable + * 0: txmac_en + */ +#define ET_TX_CTRL_FC_DISABLE 0x0008 +#define ET_TX_CTRL_TXMAC_ENABLE 0x0001 + +/* structure for shadow pointer reg in txmac address map + * located at address 0x3004 + * 31-27: reserved + * 26-16: txq rd ptr + * 15-11: reserved + * 10-0: txq wr ptr + */ + +/* structure for error count reg in txmac address map + * located at address 0x3008 + * + * 31-12: unused + * 11-8: reserved + * 7-4: txq_underrun + * 3-0: fifo_underrun + */ + +/* structure for max fill reg in txmac address map + * located at address 0x300C + * 31-12: unused + * 11-0: max fill + */ + +/* structure for cf parameter reg in txmac address map + * located at address 0x3010 + * 31-16: cfep + * 15-0: cfpt + */ + +/* structure for tx test reg in txmac address map + * located at address 0x3014 + * 31-17: unused + * 16: reserved + * 15: txtest_en + * 14-11: unused + * 10-0: txq test pointer + */ + +/* structure for error reg in txmac address map + * located at address 0x3018 + * + * 31-9: unused + * 8: fifo_underrun + * 7-6: unused + * 5: ctrl2_err + * 4: txq_underrun + * 3: bcnt_err + * 2: lseg_err + * 1: segnum_err + * 0: seg0_err + */ + +/* structure for error interrupt reg in txmac address map + * located at address 0x301C + * + * 31-9: unused + * 8: fifo_underrun + * 7-6: unused + * 5: ctrl2_err + * 4: txq_underrun + * 3: bcnt_err + * 2: lseg_err + * 1: segnum_err + * 0: seg0_err + */ + +/* structure for error interrupt reg in txmac address map + * located at address 0x3020 + * + * 31-2: unused + * 1: bp_req + * 0: bp_xonxoff + */ + +/* Tx MAC Module of JAGCore Address Mapping + */ +struct txmac_regs { /* Location: */ + u32 ctl; /* 0x3000 */ + u32 shadow_ptr; /* 0x3004 */ + u32 err_cnt; /* 0x3008 */ + u32 max_fill; /* 0x300C */ + u32 cf_param; /* 0x3010 */ + u32 tx_test; /* 0x3014 */ + u32 err; /* 0x3018 */ + u32 err_int; /* 0x301C */ + u32 bp_ctrl; /* 0x3020 */ +}; + +/* END OF TXMAC REGISTER ADDRESS MAP */ + +/* START OF RXMAC REGISTER ADDRESS MAP */ + +/* structure for rxmac control reg in rxmac address map + * located at address 0x4000 + * + * 31-7: reserved + * 6: rxmac_int_disable + * 5: async_disable + * 4: mif_disable + * 3: wol_disable + * 2: pkt_filter_disable + * 1: mcif_disable + * 0: rxmac_en + */ +#define ET_RX_CTRL_WOL_DISABLE 0x0008 +#define ET_RX_CTRL_RXMAC_ENABLE 0x0001 + +/* structure for Wake On Lan Control and CRC 0 reg in rxmac address map + * located at address 0x4004 + * 31-16: crc + * 15-12: reserved + * 11: ignore_pp + * 10: ignore_mp + * 9: clr_intr + * 8: ignore_link_chg + * 7: ignore_uni + * 6: ignore_multi + * 5: ignore_broad + * 4-0: valid_crc 4-0 + */ + +/* structure for CRC 1 and CRC 2 reg in rxmac address map + * located at address 0x4008 + * + * 31-16: crc2 + * 15-0: crc1 + */ + +/* structure for CRC 3 and CRC 4 reg in rxmac address map + * located at address 0x400C + * + * 31-16: crc4 + * 15-0: crc3 + */ + +/* structure for Wake On Lan Source Address Lo reg in rxmac address map + * located at address 0x4010 + * + * 31-24: sa3 + * 23-16: sa4 + * 15-8: sa5 + * 7-0: sa6 + */ +#define ET_RX_WOL_LO_SA3_SHIFT 24 +#define ET_RX_WOL_LO_SA4_SHIFT 16 +#define ET_RX_WOL_LO_SA5_SHIFT 8 + +/* structure for Wake On Lan Source Address Hi reg in rxmac address map + * located at address 0x4014 + * + * 31-16: reserved + * 15-8: sa1 + * 7-0: sa2 + */ +#define ET_RX_WOL_HI_SA1_SHIFT 8 + +/* structure for Wake On Lan mask reg in rxmac address map + * located at address 0x4018 - 0x4064 + * Defined earlier (u32) + */ + +/* structure for Unicast Packet Filter Address 1 reg in rxmac address map + * located at address 0x4068 + * + * 31-24: addr1_3 + * 23-16: addr1_4 + * 15-8: addr1_5 + * 7-0: addr1_6 + */ +#define ET_RX_UNI_PF_ADDR1_3_SHIFT 24 +#define ET_RX_UNI_PF_ADDR1_4_SHIFT 16 +#define ET_RX_UNI_PF_ADDR1_5_SHIFT 8 + +/* structure for Unicast Packet Filter Address 2 reg in rxmac address map + * located at address 0x406C + * + * 31-24: addr2_3 + * 23-16: addr2_4 + * 15-8: addr2_5 + * 7-0: addr2_6 + */ +#define ET_RX_UNI_PF_ADDR2_3_SHIFT 24 +#define ET_RX_UNI_PF_ADDR2_4_SHIFT 16 +#define ET_RX_UNI_PF_ADDR2_5_SHIFT 8 + +/* structure for Unicast Packet Filter Address 1 & 2 reg in rxmac address map + * located at address 0x4070 + * + * 31-24: addr2_1 + * 23-16: addr2_2 + * 15-8: addr1_1 + * 7-0: addr1_2 + */ +#define ET_RX_UNI_PF_ADDR2_1_SHIFT 24 +#define ET_RX_UNI_PF_ADDR2_2_SHIFT 16 +#define ET_RX_UNI_PF_ADDR1_1_SHIFT 8 + +/* structure for Multicast Hash reg in rxmac address map + * located at address 0x4074 - 0x4080 + * Defined earlier (u32) + */ + +/* structure for Packet Filter Control reg in rxmac address map + * located at address 0x4084 + * + * 31-23: unused + * 22-16: min_pkt_size + * 15-4: unused + * 3: filter_frag_en + * 2: filter_uni_en + * 1: filter_multi_en + * 0: filter_broad_en + */ +#define ET_RX_PFCTRL_MIN_PKT_SZ_SHIFT 16 +#define ET_RX_PFCTRL_FRAG_FILTER_ENABLE 0x0008 +#define ET_RX_PFCTRL_UNICST_FILTER_ENABLE 0x0004 +#define ET_RX_PFCTRL_MLTCST_FILTER_ENABLE 0x0002 +#define ET_RX_PFCTRL_BRDCST_FILTER_ENABLE 0x0001 + +/* structure for Memory Controller Interface Control Max Segment reg in rxmac + * address map. Located at address 0x4088 + * + * 31-10: reserved + * 9-2: max_size + * 1: fc_en + * 0: seg_en + */ +#define ET_RX_MCIF_CTRL_MAX_SEG_SIZE_SHIFT 2 +#define ET_RX_MCIF_CTRL_MAX_SEG_FC_ENABLE 0x0002 +#define ET_RX_MCIF_CTRL_MAX_SEG_ENABLE 0x0001 + +/* structure for Memory Controller Interface Water Mark reg in rxmac address + * map. Located at address 0x408C + * + * 31-26: unused + * 25-16: mark_hi + * 15-10: unused + * 9-0: mark_lo + */ + +/* structure for Rx Queue Dialog reg in rxmac address map. + * located at address 0x4090 + * + * 31-26: reserved + * 25-16: rd_ptr + * 15-10: reserved + * 9-0: wr_ptr + */ + +/* structure for space available reg in rxmac address map. + * located at address 0x4094 + * + * 31-17: reserved + * 16: space_avail_en + * 15-10: reserved + * 9-0: space_avail + */ + +/* structure for management interface reg in rxmac address map. + * located at address 0x4098 + * + * 31-18: reserved + * 17: drop_pkt_en + * 16-0: drop_pkt_mask + */ + +/* structure for Error reg in rxmac address map. + * located at address 0x409C + * + * 31-4: unused + * 3: mif + * 2: async + * 1: pkt_filter + * 0: mcif + */ + +/* Rx MAC Module of JAGCore Address Mapping + */ +struct rxmac_regs { /* Location: */ + u32 ctrl; /* 0x4000 */ + u32 crc0; /* 0x4004 */ + u32 crc12; /* 0x4008 */ + u32 crc34; /* 0x400C */ + u32 sa_lo; /* 0x4010 */ + u32 sa_hi; /* 0x4014 */ + u32 mask0_word0; /* 0x4018 */ + u32 mask0_word1; /* 0x401C */ + u32 mask0_word2; /* 0x4020 */ + u32 mask0_word3; /* 0x4024 */ + u32 mask1_word0; /* 0x4028 */ + u32 mask1_word1; /* 0x402C */ + u32 mask1_word2; /* 0x4030 */ + u32 mask1_word3; /* 0x4034 */ + u32 mask2_word0; /* 0x4038 */ + u32 mask2_word1; /* 0x403C */ + u32 mask2_word2; /* 0x4040 */ + u32 mask2_word3; /* 0x4044 */ + u32 mask3_word0; /* 0x4048 */ + u32 mask3_word1; /* 0x404C */ + u32 mask3_word2; /* 0x4050 */ + u32 mask3_word3; /* 0x4054 */ + u32 mask4_word0; /* 0x4058 */ + u32 mask4_word1; /* 0x405C */ + u32 mask4_word2; /* 0x4060 */ + u32 mask4_word3; /* 0x4064 */ + u32 uni_pf_addr1; /* 0x4068 */ + u32 uni_pf_addr2; /* 0x406C */ + u32 uni_pf_addr3; /* 0x4070 */ + u32 multi_hash1; /* 0x4074 */ + u32 multi_hash2; /* 0x4078 */ + u32 multi_hash3; /* 0x407C */ + u32 multi_hash4; /* 0x4080 */ + u32 pf_ctrl; /* 0x4084 */ + u32 mcif_ctrl_max_seg; /* 0x4088 */ + u32 mcif_water_mark; /* 0x408C */ + u32 rxq_diag; /* 0x4090 */ + u32 space_avail; /* 0x4094 */ + + u32 mif_ctrl; /* 0x4098 */ + u32 err_reg; /* 0x409C */ +}; + +/* END OF RXMAC REGISTER ADDRESS MAP */ + +/* START OF MAC REGISTER ADDRESS MAP */ +/* structure for configuration #1 reg in mac address map. + * located at address 0x5000 + * + * 31: soft reset + * 30: sim reset + * 29-20: reserved + * 19: reset rx mc + * 18: reset tx mc + * 17: reset rx func + * 16: reset tx fnc + * 15-9: reserved + * 8: loopback + * 7-6: reserved + * 5: rx flow + * 4: tx flow + * 3: syncd rx en + * 2: rx enable + * 1: syncd tx en + * 0: tx enable + */ +#define ET_MAC_CFG1_SOFT_RESET 0x80000000 +#define ET_MAC_CFG1_SIM_RESET 0x40000000 +#define ET_MAC_CFG1_RESET_RXMC 0x00080000 +#define ET_MAC_CFG1_RESET_TXMC 0x00040000 +#define ET_MAC_CFG1_RESET_RXFUNC 0x00020000 +#define ET_MAC_CFG1_RESET_TXFUNC 0x00010000 +#define ET_MAC_CFG1_LOOPBACK 0x00000100 +#define ET_MAC_CFG1_RX_FLOW 0x00000020 +#define ET_MAC_CFG1_TX_FLOW 0x00000010 +#define ET_MAC_CFG1_RX_ENABLE 0x00000004 +#define ET_MAC_CFG1_TX_ENABLE 0x00000001 +#define ET_MAC_CFG1_WAIT 0x0000000A /* RX & TX syncd */ + +/* structure for configuration #2 reg in mac address map. + * located at address 0x5004 + * 31-16: reserved + * 15-12: preamble + * 11-10: reserved + * 9-8: if mode + * 7-6: reserved + * 5: huge frame + * 4: length check + * 3: undefined + * 2: pad crc + * 1: crc enable + * 0: full duplex + */ +#define ET_MAC_CFG2_PREAMBLE_SHIFT 12 +#define ET_MAC_CFG2_IFMODE_MASK 0x0300 +#define ET_MAC_CFG2_IFMODE_1000 0x0200 +#define ET_MAC_CFG2_IFMODE_100 0x0100 +#define ET_MAC_CFG2_IFMODE_HUGE_FRAME 0x0020 +#define ET_MAC_CFG2_IFMODE_LEN_CHECK 0x0010 +#define ET_MAC_CFG2_IFMODE_PAD_CRC 0x0004 +#define ET_MAC_CFG2_IFMODE_CRC_ENABLE 0x0002 +#define ET_MAC_CFG2_IFMODE_FULL_DPLX 0x0001 + +/* structure for Interpacket gap reg in mac address map. + * located at address 0x5008 + * + * 31: reserved + * 30-24: non B2B ipg 1 + * 23: undefined + * 22-16: non B2B ipg 2 + * 15-8: Min ifg enforce + * 7-0: B2B ipg + * + * structure for half duplex reg in mac address map. + * located at address 0x500C + * 31-24: reserved + * 23-20: Alt BEB trunc + * 19: Alt BEB enable + * 18: BP no backoff + * 17: no backoff + * 16: excess defer + * 15-12: re-xmit max + * 11-10: reserved + * 9-0: collision window + */ + +/* structure for Maximum Frame Length reg in mac address map. + * located at address 0x5010: bits 0-15 hold the length. + */ + +/* structure for Reserve 1 reg in mac address map. + * located at address 0x5014 - 0x5018 + * Defined earlier (u32) + */ + +/* structure for Test reg in mac address map. + * located at address 0x501C + * test: bits 0-2, rest unused + */ + +/* structure for MII Management Configuration reg in mac address map. + * located at address 0x5020 + * + * 31: reset MII mgmt + * 30-6: unused + * 5: scan auto increment + * 4: preamble suppress + * 3: undefined + * 2-0: mgmt clock reset + */ +#define ET_MAC_MIIMGMT_CLK_RST 0x0007 + +/* structure for MII Management Command reg in mac address map. + * located at address 0x5024 + * bit 1: scan cycle + * bit 0: read cycle + */ + +/* structure for MII Management Address reg in mac address map. + * located at address 0x5028 + * 31-13: reserved + * 12-8: phy addr + * 7-5: reserved + * 4-0: register + */ +#define ET_MAC_MII_ADDR(phy, reg) ((phy) << 8 | (reg)) + +/* structure for MII Management Control reg in mac address map. + * located at address 0x502C + * 31-16: reserved + * 15-0: phy control + */ + +/* structure for MII Management Status reg in mac address map. + * located at address 0x5030 + * 31-16: reserved + * 15-0: phy control + */ +#define ET_MAC_MIIMGMT_STAT_PHYCRTL_MASK 0xFFFF + +/* structure for MII Management Indicators reg in mac address map. + * located at address 0x5034 + * 31-3: reserved + * 2: not valid + * 1: scanning + * 0: busy + */ +#define ET_MAC_MGMT_BUSY 0x00000001 /* busy */ +#define ET_MAC_MGMT_WAIT 0x00000005 /* busy | not valid */ + +/* structure for Interface Control reg in mac address map. + * located at address 0x5038 + * + * 31: reset if module + * 30-28: reserved + * 27: tbi mode + * 26: ghd mode + * 25: lhd mode + * 24: phy mode + * 23: reset per mii + * 22-17: reserved + * 16: speed + * 15: reset pe100x + * 14-11: reserved + * 10: force quiet + * 9: no cipher + * 8: disable link fail + * 7: reset gpsi + * 6-1: reserved + * 0: enable jabber protection + */ +#define ET_MAC_IFCTRL_GHDMODE (1 << 26) +#define ET_MAC_IFCTRL_PHYMODE (1 << 24) + +/* structure for Interface Status reg in mac address map. + * located at address 0x503C + * + * 31-10: reserved + * 9: excess_defer + * 8: clash + * 7: phy_jabber + * 6: phy_link_ok + * 5: phy_full_duplex + * 4: phy_speed + * 3: pe100x_link_fail + * 2: pe10t_loss_carrier + * 1: pe10t_sqe_error + * 0: pe10t_jabber + */ + +/* structure for Mac Station Address, Part 1 reg in mac address map. + * located at address 0x5040 + * + * 31-24: Octet6 + * 23-16: Octet5 + * 15-8: Octet4 + * 7-0: Octet3 + */ +#define ET_MAC_STATION_ADDR1_OC6_SHIFT 24 +#define ET_MAC_STATION_ADDR1_OC5_SHIFT 16 +#define ET_MAC_STATION_ADDR1_OC4_SHIFT 8 + +/* structure for Mac Station Address, Part 2 reg in mac address map. + * located at address 0x5044 + * + * 31-24: Octet2 + * 23-16: Octet1 + * 15-0: reserved + */ +#define ET_MAC_STATION_ADDR2_OC2_SHIFT 24 +#define ET_MAC_STATION_ADDR2_OC1_SHIFT 16 + +/* MAC Module of JAGCore Address Mapping + */ +struct mac_regs { /* Location: */ + u32 cfg1; /* 0x5000 */ + u32 cfg2; /* 0x5004 */ + u32 ipg; /* 0x5008 */ + u32 hfdp; /* 0x500C */ + u32 max_fm_len; /* 0x5010 */ + u32 rsv1; /* 0x5014 */ + u32 rsv2; /* 0x5018 */ + u32 mac_test; /* 0x501C */ + u32 mii_mgmt_cfg; /* 0x5020 */ + u32 mii_mgmt_cmd; /* 0x5024 */ + u32 mii_mgmt_addr; /* 0x5028 */ + u32 mii_mgmt_ctrl; /* 0x502C */ + u32 mii_mgmt_stat; /* 0x5030 */ + u32 mii_mgmt_indicator; /* 0x5034 */ + u32 if_ctrl; /* 0x5038 */ + u32 if_stat; /* 0x503C */ + u32 station_addr_1; /* 0x5040 */ + u32 station_addr_2; /* 0x5044 */ +}; + +/* END OF MAC REGISTER ADDRESS MAP */ + +/* START OF MAC STAT REGISTER ADDRESS MAP */ +/* structure for Carry Register One and it's Mask Register reg located in mac + * stat address map address 0x6130 and 0x6138. + * + * 31: tr64 + * 30: tr127 + * 29: tr255 + * 28: tr511 + * 27: tr1k + * 26: trmax + * 25: trmgv + * 24-17: unused + * 16: rbyt + * 15: rpkt + * 14: rfcs + * 13: rmca + * 12: rbca + * 11: rxcf + * 10: rxpf + * 9: rxuo + * 8: raln + * 7: rflr + * 6: rcde + * 5: rcse + * 4: rund + * 3: rovr + * 2: rfrg + * 1: rjbr + * 0: rdrp + */ + +/* structure for Carry Register Two Mask Register reg in mac stat address map. + * located at address 0x613C + * + * 31-20: unused + * 19: tjbr + * 18: tfcs + * 17: txcf + * 16: tovr + * 15: tund + * 14: trfg + * 13: tbyt + * 12: tpkt + * 11: tmca + * 10: tbca + * 9: txpf + * 8: tdfr + * 7: tedf + * 6: tscl + * 5: tmcl + * 4: tlcl + * 3: txcl + * 2: tncl + * 1: tpfh + * 0: tdrp + */ + +/* MAC STATS Module of JAGCore Address Mapping + */ +struct macstat_regs { /* Location: */ + u32 pad[32]; /* 0x6000 - 607C */ + + /* counters */ + u32 txrx_0_64_byte_frames; /* 0x6080 */ + u32 txrx_65_127_byte_frames; /* 0x6084 */ + u32 txrx_128_255_byte_frames; /* 0x6088 */ + u32 txrx_256_511_byte_frames; /* 0x608C */ + u32 txrx_512_1023_byte_frames; /* 0x6090 */ + u32 txrx_1024_1518_byte_frames; /* 0x6094 */ + u32 txrx_1519_1522_gvln_frames; /* 0x6098 */ + u32 rx_bytes; /* 0x609C */ + u32 rx_packets; /* 0x60A0 */ + u32 rx_fcs_errs; /* 0x60A4 */ + u32 rx_multicast_packets; /* 0x60A8 */ + u32 rx_broadcast_packets; /* 0x60AC */ + u32 rx_control_frames; /* 0x60B0 */ + u32 rx_pause_frames; /* 0x60B4 */ + u32 rx_unknown_opcodes; /* 0x60B8 */ + u32 rx_align_errs; /* 0x60BC */ + u32 rx_frame_len_errs; /* 0x60C0 */ + u32 rx_code_errs; /* 0x60C4 */ + u32 rx_carrier_sense_errs; /* 0x60C8 */ + u32 rx_undersize_packets; /* 0x60CC */ + u32 rx_oversize_packets; /* 0x60D0 */ + u32 rx_fragment_packets; /* 0x60D4 */ + u32 rx_jabbers; /* 0x60D8 */ + u32 rx_drops; /* 0x60DC */ + u32 tx_bytes; /* 0x60E0 */ + u32 tx_packets; /* 0x60E4 */ + u32 tx_multicast_packets; /* 0x60E8 */ + u32 tx_broadcast_packets; /* 0x60EC */ + u32 tx_pause_frames; /* 0x60F0 */ + u32 tx_deferred; /* 0x60F4 */ + u32 tx_excessive_deferred; /* 0x60F8 */ + u32 tx_single_collisions; /* 0x60FC */ + u32 tx_multiple_collisions; /* 0x6100 */ + u32 tx_late_collisions; /* 0x6104 */ + u32 tx_excessive_collisions; /* 0x6108 */ + u32 tx_total_collisions; /* 0x610C */ + u32 tx_pause_honored_frames; /* 0x6110 */ + u32 tx_drops; /* 0x6114 */ + u32 tx_jabbers; /* 0x6118 */ + u32 tx_fcs_errs; /* 0x611C */ + u32 tx_control_frames; /* 0x6120 */ + u32 tx_oversize_frames; /* 0x6124 */ + u32 tx_undersize_frames; /* 0x6128 */ + u32 tx_fragments; /* 0x612C */ + u32 carry_reg1; /* 0x6130 */ + u32 carry_reg2; /* 0x6134 */ + u32 carry_reg1_mask; /* 0x6138 */ + u32 carry_reg2_mask; /* 0x613C */ +}; + +/* END OF MAC STAT REGISTER ADDRESS MAP */ + +/* START OF MMC REGISTER ADDRESS MAP */ +/* Main Memory Controller Control reg in mmc address map. + * located at address 0x7000 + */ +#define ET_MMC_ENABLE 1 +#define ET_MMC_ARB_DISABLE 2 +#define ET_MMC_RXMAC_DISABLE 4 +#define ET_MMC_TXMAC_DISABLE 8 +#define ET_MMC_TXDMA_DISABLE 16 +#define ET_MMC_RXDMA_DISABLE 32 +#define ET_MMC_FORCE_CE 64 + +/* Main Memory Controller Host Memory Access Address reg in mmc + * address map. Located at address 0x7004. Top 16 bits hold the address bits + */ +#define ET_SRAM_REQ_ACCESS 1 +#define ET_SRAM_WR_ACCESS 2 +#define ET_SRAM_IS_CTRL 4 + +/* structure for Main Memory Controller Host Memory Access Data reg in mmc + * address map. Located at address 0x7008 - 0x7014 + * Defined earlier (u32) + */ + +/* Memory Control Module of JAGCore Address Mapping + */ +struct mmc_regs { /* Location: */ + u32 mmc_ctrl; /* 0x7000 */ + u32 sram_access; /* 0x7004 */ + u32 sram_word1; /* 0x7008 */ + u32 sram_word2; /* 0x700C */ + u32 sram_word3; /* 0x7010 */ + u32 sram_word4; /* 0x7014 */ +}; + +/* END OF MMC REGISTER ADDRESS MAP */ + +/* JAGCore Address Mapping + */ +struct address_map { + struct global_regs global; + /* unused section of global address map */ + u8 unused_global[4096 - sizeof(struct global_regs)]; + struct txdma_regs txdma; + /* unused section of txdma address map */ + u8 unused_txdma[4096 - sizeof(struct txdma_regs)]; + struct rxdma_regs rxdma; + /* unused section of rxdma address map */ + u8 unused_rxdma[4096 - sizeof(struct rxdma_regs)]; + struct txmac_regs txmac; + /* unused section of txmac address map */ + u8 unused_txmac[4096 - sizeof(struct txmac_regs)]; + struct rxmac_regs rxmac; + /* unused section of rxmac address map */ + u8 unused_rxmac[4096 - sizeof(struct rxmac_regs)]; + struct mac_regs mac; + /* unused section of mac address map */ + u8 unused_mac[4096 - sizeof(struct mac_regs)]; + struct macstat_regs macstat; + /* unused section of mac stat address map */ + u8 unused_mac_stat[4096 - sizeof(struct macstat_regs)]; + struct mmc_regs mmc; + /* unused section of mmc address map */ + u8 unused_mmc[4096 - sizeof(struct mmc_regs)]; + /* unused section of address map */ + u8 unused_[1015808]; + u8 unused_exp_rom[4096]; /* MGS-size TBD */ + u8 unused__[524288]; /* unused section of address map */ +}; + +/* Defines for generic MII registers 0x00 -> 0x0F can be found in + * include/linux/mii.h + */ +/* some defines for modem registers that seem to be 'reserved' */ +#define PHY_INDEX_REG 0x10 +#define PHY_DATA_REG 0x11 +#define PHY_MPHY_CONTROL_REG 0x12 + +/* defines for specified registers */ +#define PHY_LOOPBACK_CONTROL 0x13 /* TRU_VMI_LOOPBACK_CONTROL_1_REG 19 */ + /* TRU_VMI_LOOPBACK_CONTROL_2_REG 20 */ +#define PHY_REGISTER_MGMT_CONTROL 0x15 /* TRU_VMI_MI_SEQ_CONTROL_REG 21 */ +#define PHY_CONFIG 0x16 /* TRU_VMI_CONFIGURATION_REG 22 */ +#define PHY_PHY_CONTROL 0x17 /* TRU_VMI_PHY_CONTROL_REG 23 */ +#define PHY_INTERRUPT_MASK 0x18 /* TRU_VMI_INTERRUPT_MASK_REG 24 */ +#define PHY_INTERRUPT_STATUS 0x19 /* TRU_VMI_INTERRUPT_STATUS_REG 25 */ +#define PHY_PHY_STATUS 0x1A /* TRU_VMI_PHY_STATUS_REG 26 */ +#define PHY_LED_1 0x1B /* TRU_VMI_LED_CONTROL_1_REG 27 */ +#define PHY_LED_2 0x1C /* TRU_VMI_LED_CONTROL_2_REG 28 */ + /* TRU_VMI_LINK_CONTROL_REG 29 */ + /* TRU_VMI_TIMING_CONTROL_REG */ + +/* MI Register 10: Gigabit basic mode status reg(Reg 0x0A) */ +#define ET_1000BT_MSTR_SLV 0x4000 + +/* MI Register 16 - 18: Reserved Reg(0x10-0x12) */ + +/* MI Register 19: Loopback Control Reg(0x13) + * 15: mii_en + * 14: pcs_en + * 13: pmd_en + * 12: all_digital_en + * 11: replica_en + * 10: line_driver_en + * 9-0: reserved + */ + +/* MI Register 20: Reserved Reg(0x14) */ + +/* MI Register 21: Management Interface Control Reg(0x15) + * 15-11: reserved + * 10-4: mi_error_count + * 3: reserved + * 2: ignore_10g_fr + * 1: reserved + * 0: preamble_suppress_en + */ + +/* MI Register 22: PHY Configuration Reg(0x16) + * 15: crs_tx_en + * 14: reserved + * 13-12: tx_fifo_depth + * 11-10: speed_downshift + * 9: pbi_detect + * 8: tbi_rate + * 7: alternate_np + * 6: group_mdio_en + * 5: tx_clock_en + * 4: sys_clock_en + * 3: reserved + * 2-0: mac_if_mode + */ +#define ET_PHY_CONFIG_TX_FIFO_DEPTH 0x3000 + +#define ET_PHY_CONFIG_FIFO_DEPTH_8 0x0000 +#define ET_PHY_CONFIG_FIFO_DEPTH_16 0x1000 +#define ET_PHY_CONFIG_FIFO_DEPTH_32 0x2000 +#define ET_PHY_CONFIG_FIFO_DEPTH_64 0x3000 + +/* MI Register 23: PHY CONTROL Reg(0x17) + * 15: reserved + * 14: tdr_en + * 13: reserved + * 12-11: downshift_attempts + * 10-6: reserved + * 5: jabber_10baseT + * 4: sqe_10baseT + * 3: tp_loopback_10baseT + * 2: preamble_gen_en + * 1: reserved + * 0: force_int + */ + +/* MI Register 24: Interrupt Mask Reg(0x18) + * 15-10: reserved + * 9: mdio_sync_lost + * 8: autoneg_status + * 7: hi_bit_err + * 6: np_rx + * 5: err_counter_full + * 4: fifo_over_underflow + * 3: rx_status + * 2: link_status + * 1: automatic_speed + * 0: int_en + */ + +/* MI Register 25: Interrupt Status Reg(0x19) + * 15-10: reserved + * 9: mdio_sync_lost + * 8: autoneg_status + * 7: hi_bit_err + * 6: np_rx + * 5: err_counter_full + * 4: fifo_over_underflow + * 3: rx_status + * 2: link_status + * 1: automatic_speed + * 0: int_en + */ + +/* MI Register 26: PHY Status Reg(0x1A) + * 15: reserved + * 14-13: autoneg_fault + * 12: autoneg_status + * 11: mdi_x_status + * 10: polarity_status + * 9-8: speed_status + * 7: duplex_status + * 6: link_status + * 5: tx_status + * 4: rx_status + * 3: collision_status + * 2: autoneg_en + * 1: pause_en + * 0: asymmetric_dir + */ +#define ET_PHY_AUTONEG_STATUS 0x1000 +#define ET_PHY_POLARITY_STATUS 0x0400 +#define ET_PHY_SPEED_STATUS 0x0300 +#define ET_PHY_DUPLEX_STATUS 0x0080 +#define ET_PHY_LSTATUS 0x0040 +#define ET_PHY_AUTONEG_ENABLE 0x0020 + +/* MI Register 27: LED Control Reg 1(0x1B) + * 15-14: reserved + * 13-12: led_dup_indicate + * 11-10: led_10baseT + * 9-8: led_collision + * 7-4: reserved + * 3-2: pulse_dur + * 1: pulse_stretch1 + * 0: pulse_stretch0 + */ + +/* MI Register 28: LED Control Reg 2(0x1C) + * 15-12: led_link + * 11-8: led_tx_rx + * 7-4: led_100BaseTX + * 3-0: led_1000BaseT + */ +#define ET_LED2_LED_LINK 0xF000 +#define ET_LED2_LED_TXRX 0x0F00 +#define ET_LED2_LED_100TX 0x00F0 +#define ET_LED2_LED_1000T 0x000F + +/* defines for LED control reg 2 values */ +#define LED_VAL_1000BT 0x0 +#define LED_VAL_100BTX 0x1 +#define LED_VAL_10BT 0x2 +#define LED_VAL_1000BT_100BTX 0x3 /* 1000BT on, 100BTX blink */ +#define LED_VAL_LINKON 0x4 +#define LED_VAL_TX 0x5 +#define LED_VAL_RX 0x6 +#define LED_VAL_TXRX 0x7 /* TX or RX */ +#define LED_VAL_DUPLEXFULL 0x8 +#define LED_VAL_COLLISION 0x9 +#define LED_VAL_LINKON_ACTIVE 0xA /* Link on, activity blink */ +#define LED_VAL_LINKON_RECV 0xB /* Link on, receive blink */ +#define LED_VAL_DUPLEXFULL_COLLISION 0xC /* Duplex on, collision blink */ +#define LED_VAL_BLINK 0xD +#define LED_VAL_ON 0xE +#define LED_VAL_OFF 0xF + +#define LED_LINK_SHIFT 12 +#define LED_TXRX_SHIFT 8 +#define LED_100TX_SHIFT 4 + +/* MI Register 29 - 31: Reserved Reg(0x1D - 0x1E) */ diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 35b494f5667f..ee256138b2c4 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -24,8 +24,6 @@ menuconfig STAGING if STAGING -source "drivers/staging/et131x/Kconfig" - source "drivers/staging/slicoss/Kconfig" source "drivers/staging/wlan-ng/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index e66a5dbd9b02..64bf31200c88 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -4,7 +4,6 @@ obj-$(CONFIG_STAGING) += staging.o obj-y += media/ -obj-$(CONFIG_ET131X) += et131x/ obj-$(CONFIG_SLICOSS) += slicoss/ obj-$(CONFIG_PRISM2_USB) += wlan-ng/ obj-$(CONFIG_COMEDI) += comedi/ -- cgit v1.2.3-59-g8ed1b From c3a803e81713c8d1f3a79afdc35014f3f9f07561 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 3 Oct 2014 14:45:00 -0700 Subject: Update Intel Ethernet Driver maintainers list I will no longer be working for Intel as of today. As such I am removing myself from the maintainers list and adding my replacement, Matthew Vick as he will be taking over maintenance of the fm10k driver. Signed-off-by: Alexander Duyck Acked-by: Jeff Kirsher Signed-off-by: David S. Miller --- MAINTAINERS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index f10ed3914ea8..646e2a20fec5 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4789,14 +4789,14 @@ M: Deepak Saxena S: Maintained F: drivers/char/hw_random/ixp4xx-rng.c -INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e/i40evf) +INTEL ETHERNET DRIVERS (e100/e1000/e1000e/fm10k/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e/i40evf) M: Jeff Kirsher M: Jesse Brandeburg M: Bruce Allan M: Carolyn Wyborny M: Don Skidmore M: Greg Rose -M: Alex Duyck +M: Matthew Vick M: John Ronciak M: Mitch Williams M: Linux NICS -- cgit v1.2.3-59-g8ed1b From 2b6d83e2b8b7de82331a6a1dcd64b51020a6031c Mon Sep 17 00:00:00 2001 From: Jassi Brar Date: Thu, 12 Jun 2014 22:31:19 +0530 Subject: mailbox: Introduce framework for mailbox Introduce common framework for client/protocol drivers and controller drivers of Inter-Processor-Communication (IPC). Client driver developers should have a look at include/linux/mailbox_client.h to understand the part of the API exposed to client drivers. Similarly controller driver developers should have a look at include/linux/mailbox_controller.h Reviewed-by: Mark Brown Signed-off-by: Jassi Brar --- MAINTAINERS | 8 + drivers/mailbox/Makefile | 4 + drivers/mailbox/mailbox.c | 465 +++++++++++++++++++++++++++++++++++++ include/linux/mailbox_client.h | 46 ++++ include/linux/mailbox_controller.h | 133 +++++++++++ 5 files changed, 656 insertions(+) create mode 100644 drivers/mailbox/mailbox.c create mode 100644 include/linux/mailbox_client.h create mode 100644 include/linux/mailbox_controller.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index a12edf2624e5..c5f7c854b6d2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5740,6 +5740,14 @@ S: Maintained F: drivers/net/macvlan.c F: include/linux/if_macvlan.h +MAILBOX API +M: Jassi Brar +L: linux-kernel@vger.kernel.org +S: Maintained +F: drivers/mailbox/ +F: include/linux/mailbox_client.h +F: include/linux/mailbox_controller.h + MAN-PAGES: MANUAL PAGES FOR LINUX -- Sections 2, 3, 4, 5, and 7 M: Michael Kerrisk W: http://www.kernel.org/doc/man-pages diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile index 6d184dbcaca8..94ed7cefb14d 100644 --- a/drivers/mailbox/Makefile +++ b/drivers/mailbox/Makefile @@ -1,3 +1,7 @@ +# Generic MAILBOX API + +obj-$(CONFIG_MAILBOX) += mailbox.o + obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o obj-$(CONFIG_OMAP2PLUS_MBOX) += omap-mailbox.o diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c new file mode 100644 index 000000000000..afcb430508ec --- /dev/null +++ b/drivers/mailbox/mailbox.c @@ -0,0 +1,465 @@ +/* + * Mailbox: Common code for Mailbox controllers and users + * + * Copyright (C) 2013-2014 Linaro Ltd. + * Author: Jassi Brar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TXDONE_BY_IRQ BIT(0) /* controller has remote RTR irq */ +#define TXDONE_BY_POLL BIT(1) /* controller can read status of last TX */ +#define TXDONE_BY_ACK BIT(2) /* S/W ACK recevied by Client ticks the TX */ + +static LIST_HEAD(mbox_cons); +static DEFINE_MUTEX(con_mutex); + +static int add_to_rbuf(struct mbox_chan *chan, void *mssg) +{ + int idx; + unsigned long flags; + + spin_lock_irqsave(&chan->lock, flags); + + /* See if there is any space left */ + if (chan->msg_count == MBOX_TX_QUEUE_LEN) { + spin_unlock_irqrestore(&chan->lock, flags); + return -ENOBUFS; + } + + idx = chan->msg_free; + chan->msg_data[idx] = mssg; + chan->msg_count++; + + if (idx == MBOX_TX_QUEUE_LEN - 1) + chan->msg_free = 0; + else + chan->msg_free++; + + spin_unlock_irqrestore(&chan->lock, flags); + + return idx; +} + +static void msg_submit(struct mbox_chan *chan) +{ + unsigned count, idx; + unsigned long flags; + void *data; + int err; + + spin_lock_irqsave(&chan->lock, flags); + + if (!chan->msg_count || chan->active_req) + goto exit; + + count = chan->msg_count; + idx = chan->msg_free; + if (idx >= count) + idx -= count; + else + idx += MBOX_TX_QUEUE_LEN - count; + + data = chan->msg_data[idx]; + + /* Try to submit a message to the MBOX controller */ + err = chan->mbox->ops->send_data(chan, data); + if (!err) { + chan->active_req = data; + chan->msg_count--; + } +exit: + spin_unlock_irqrestore(&chan->lock, flags); +} + +static void tx_tick(struct mbox_chan *chan, int r) +{ + unsigned long flags; + void *mssg; + + spin_lock_irqsave(&chan->lock, flags); + mssg = chan->active_req; + chan->active_req = NULL; + spin_unlock_irqrestore(&chan->lock, flags); + + /* Submit next message */ + msg_submit(chan); + + /* Notify the client */ + if (mssg && chan->cl->tx_done) + chan->cl->tx_done(chan->cl, mssg, r); + + if (chan->cl->tx_block) + complete(&chan->tx_complete); +} + +static void poll_txdone(unsigned long data) +{ + struct mbox_controller *mbox = (struct mbox_controller *)data; + bool txdone, resched = false; + int i; + + for (i = 0; i < mbox->num_chans; i++) { + struct mbox_chan *chan = &mbox->chans[i]; + + if (chan->active_req && chan->cl) { + resched = true; + txdone = chan->mbox->ops->last_tx_done(chan); + if (txdone) + tx_tick(chan, 0); + } + } + + if (resched) + mod_timer(&mbox->poll, jiffies + + msecs_to_jiffies(mbox->txpoll_period)); +} + +/** + * mbox_chan_received_data - A way for controller driver to push data + * received from remote to the upper layer. + * @chan: Pointer to the mailbox channel on which RX happened. + * @mssg: Client specific message typecasted as void * + * + * After startup and before shutdown any data received on the chan + * is passed on to the API via atomic mbox_chan_received_data(). + * The controller should ACK the RX only after this call returns. + */ +void mbox_chan_received_data(struct mbox_chan *chan, void *mssg) +{ + /* No buffering the received data */ + if (chan->cl->rx_callback) + chan->cl->rx_callback(chan->cl, mssg); +} +EXPORT_SYMBOL_GPL(mbox_chan_received_data); + +/** + * mbox_chan_txdone - A way for controller driver to notify the + * framework that the last TX has completed. + * @chan: Pointer to the mailbox chan on which TX happened. + * @r: Status of last TX - OK or ERROR + * + * The controller that has IRQ for TX ACK calls this atomic API + * to tick the TX state machine. It works only if txdone_irq + * is set by the controller. + */ +void mbox_chan_txdone(struct mbox_chan *chan, int r) +{ + if (unlikely(!(chan->txdone_method & TXDONE_BY_IRQ))) { + dev_err(chan->mbox->dev, + "Controller can't run the TX ticker\n"); + return; + } + + tx_tick(chan, r); +} +EXPORT_SYMBOL_GPL(mbox_chan_txdone); + +/** + * mbox_client_txdone - The way for a client to run the TX state machine. + * @chan: Mailbox channel assigned to this client. + * @r: Success status of last transmission. + * + * The client/protocol had received some 'ACK' packet and it notifies + * the API that the last packet was sent successfully. This only works + * if the controller can't sense TX-Done. + */ +void mbox_client_txdone(struct mbox_chan *chan, int r) +{ + if (unlikely(!(chan->txdone_method & TXDONE_BY_ACK))) { + dev_err(chan->mbox->dev, "Client can't run the TX ticker\n"); + return; + } + + tx_tick(chan, r); +} +EXPORT_SYMBOL_GPL(mbox_client_txdone); + +/** + * mbox_client_peek_data - A way for client driver to pull data + * received from remote by the controller. + * @chan: Mailbox channel assigned to this client. + * + * A poke to controller driver for any received data. + * The data is actually passed onto client via the + * mbox_chan_received_data() + * The call can be made from atomic context, so the controller's + * implementation of peek_data() must not sleep. + * + * Return: True, if controller has, and is going to push after this, + * some data. + * False, if controller doesn't have any data to be read. + */ +bool mbox_client_peek_data(struct mbox_chan *chan) +{ + if (chan->mbox->ops->peek_data) + return chan->mbox->ops->peek_data(chan); + + return false; +} +EXPORT_SYMBOL_GPL(mbox_client_peek_data); + +/** + * mbox_send_message - For client to submit a message to be + * sent to the remote. + * @chan: Mailbox channel assigned to this client. + * @mssg: Client specific message typecasted. + * + * For client to submit data to the controller destined for a remote + * processor. If the client had set 'tx_block', the call will return + * either when the remote receives the data or when 'tx_tout' millisecs + * run out. + * In non-blocking mode, the requests are buffered by the API and a + * non-negative token is returned for each queued request. If the request + * is not queued, a negative token is returned. Upon failure or successful + * TX, the API calls 'tx_done' from atomic context, from which the client + * could submit yet another request. + * The pointer to message should be preserved until it is sent + * over the chan, i.e, tx_done() is made. + * This function could be called from atomic context as it simply + * queues the data and returns a token against the request. + * + * Return: Non-negative integer for successful submission (non-blocking mode) + * or transmission over chan (blocking mode). + * Negative value denotes failure. + */ +int mbox_send_message(struct mbox_chan *chan, void *mssg) +{ + int t; + + if (!chan || !chan->cl) + return -EINVAL; + + t = add_to_rbuf(chan, mssg); + if (t < 0) { + dev_err(chan->mbox->dev, "Try increasing MBOX_TX_QUEUE_LEN\n"); + return t; + } + + msg_submit(chan); + + if (chan->txdone_method == TXDONE_BY_POLL) + poll_txdone((unsigned long)chan->mbox); + + if (chan->cl->tx_block && chan->active_req) { + unsigned long wait; + int ret; + + if (!chan->cl->tx_tout) /* wait forever */ + wait = msecs_to_jiffies(3600000); + else + wait = msecs_to_jiffies(chan->cl->tx_tout); + + ret = wait_for_completion_timeout(&chan->tx_complete, wait); + if (ret == 0) { + t = -EIO; + tx_tick(chan, -EIO); + } + } + + return t; +} +EXPORT_SYMBOL_GPL(mbox_send_message); + +/** + * mbox_request_channel - Request a mailbox channel. + * @cl: Identity of the client requesting the channel. + * @index: Index of mailbox specifier in 'mboxes' property. + * + * The Client specifies its requirements and capabilities while asking for + * a mailbox channel. It can't be called from atomic context. + * The channel is exclusively allocated and can't be used by another + * client before the owner calls mbox_free_channel. + * After assignment, any packet received on this channel will be + * handed over to the client via the 'rx_callback'. + * The framework holds reference to the client, so the mbox_client + * structure shouldn't be modified until the mbox_free_channel returns. + * + * Return: Pointer to the channel assigned to the client if successful. + * ERR_PTR for request failure. + */ +struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index) +{ + struct device *dev = cl->dev; + struct mbox_controller *mbox; + struct of_phandle_args spec; + struct mbox_chan *chan; + unsigned long flags; + int ret; + + if (!dev || !dev->of_node) { + pr_debug("%s: No owner device node\n", __func__); + return ERR_PTR(-ENODEV); + } + + mutex_lock(&con_mutex); + + if (of_parse_phandle_with_args(dev->of_node, "mboxes", + "#mbox-cells", index, &spec)) { + dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__); + mutex_unlock(&con_mutex); + return ERR_PTR(-ENODEV); + } + + chan = NULL; + list_for_each_entry(mbox, &mbox_cons, node) + if (mbox->dev->of_node == spec.np) { + chan = mbox->of_xlate(mbox, &spec); + break; + } + + of_node_put(spec.np); + + if (!chan || chan->cl || !try_module_get(mbox->dev->driver->owner)) { + dev_dbg(dev, "%s: mailbox not free\n", __func__); + mutex_unlock(&con_mutex); + return ERR_PTR(-EBUSY); + } + + spin_lock_irqsave(&chan->lock, flags); + chan->msg_free = 0; + chan->msg_count = 0; + chan->active_req = NULL; + chan->cl = cl; + init_completion(&chan->tx_complete); + + if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone) + chan->txdone_method |= TXDONE_BY_ACK; + + spin_unlock_irqrestore(&chan->lock, flags); + + ret = chan->mbox->ops->startup(chan); + if (ret) { + dev_err(dev, "Unable to startup the chan (%d)\n", ret); + mbox_free_channel(chan); + chan = ERR_PTR(ret); + } + + mutex_unlock(&con_mutex); + return chan; +} +EXPORT_SYMBOL_GPL(mbox_request_channel); + +/** + * mbox_free_channel - The client relinquishes control of a mailbox + * channel by this call. + * @chan: The mailbox channel to be freed. + */ +void mbox_free_channel(struct mbox_chan *chan) +{ + unsigned long flags; + + if (!chan || !chan->cl) + return; + + chan->mbox->ops->shutdown(chan); + + /* The queued TX requests are simply aborted, no callbacks are made */ + spin_lock_irqsave(&chan->lock, flags); + chan->cl = NULL; + chan->active_req = NULL; + if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK)) + chan->txdone_method = TXDONE_BY_POLL; + + module_put(chan->mbox->dev->driver->owner); + spin_unlock_irqrestore(&chan->lock, flags); +} +EXPORT_SYMBOL_GPL(mbox_free_channel); + +static struct mbox_chan * +of_mbox_index_xlate(struct mbox_controller *mbox, + const struct of_phandle_args *sp) +{ + int ind = sp->args[0]; + + if (ind >= mbox->num_chans) + return NULL; + + return &mbox->chans[ind]; +} + +/** + * mbox_controller_register - Register the mailbox controller + * @mbox: Pointer to the mailbox controller. + * + * The controller driver registers its communication channels + */ +int mbox_controller_register(struct mbox_controller *mbox) +{ + int i, txdone; + + /* Sanity check */ + if (!mbox || !mbox->dev || !mbox->ops || !mbox->num_chans) + return -EINVAL; + + if (mbox->txdone_irq) + txdone = TXDONE_BY_IRQ; + else if (mbox->txdone_poll) + txdone = TXDONE_BY_POLL; + else /* It has to be ACK then */ + txdone = TXDONE_BY_ACK; + + if (txdone == TXDONE_BY_POLL) { + mbox->poll.function = &poll_txdone; + mbox->poll.data = (unsigned long)mbox; + init_timer(&mbox->poll); + } + + for (i = 0; i < mbox->num_chans; i++) { + struct mbox_chan *chan = &mbox->chans[i]; + + chan->cl = NULL; + chan->mbox = mbox; + chan->txdone_method = txdone; + spin_lock_init(&chan->lock); + } + + if (!mbox->of_xlate) + mbox->of_xlate = of_mbox_index_xlate; + + mutex_lock(&con_mutex); + list_add_tail(&mbox->node, &mbox_cons); + mutex_unlock(&con_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(mbox_controller_register); + +/** + * mbox_controller_unregister - Unregister the mailbox controller + * @mbox: Pointer to the mailbox controller. + */ +void mbox_controller_unregister(struct mbox_controller *mbox) +{ + int i; + + if (!mbox) + return; + + mutex_lock(&con_mutex); + + list_del(&mbox->node); + + for (i = 0; i < mbox->num_chans; i++) + mbox_free_channel(&mbox->chans[i]); + + if (mbox->txdone_poll) + del_timer_sync(&mbox->poll); + + mutex_unlock(&con_mutex); +} +EXPORT_SYMBOL_GPL(mbox_controller_unregister); diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h new file mode 100644 index 000000000000..307d9cab2026 --- /dev/null +++ b/include/linux/mailbox_client.h @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2013-2014 Linaro Ltd. + * Author: Jassi Brar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MAILBOX_CLIENT_H +#define __MAILBOX_CLIENT_H + +#include +#include + +struct mbox_chan; + +/** + * struct mbox_client - User of a mailbox + * @dev: The client device + * @tx_block: If the mbox_send_message should block until data is + * transmitted. + * @tx_tout: Max block period in ms before TX is assumed failure + * @knows_txdone: If the client could run the TX state machine. Usually + * if the client receives some ACK packet for transmission. + * Unused if the controller already has TX_Done/RTR IRQ. + * @rx_callback: Atomic callback to provide client the data received + * @tx_done: Atomic callback to tell client of data transmission + */ +struct mbox_client { + struct device *dev; + bool tx_block; + unsigned long tx_tout; + bool knows_txdone; + + void (*rx_callback)(struct mbox_client *cl, void *mssg); + void (*tx_done)(struct mbox_client *cl, void *mssg, int r); +}; + +struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index); +int mbox_send_message(struct mbox_chan *chan, void *mssg); +void mbox_client_txdone(struct mbox_chan *chan, int r); /* atomic */ +bool mbox_client_peek_data(struct mbox_chan *chan); /* atomic */ +void mbox_free_channel(struct mbox_chan *chan); /* may sleep */ + +#endif /* __MAILBOX_CLIENT_H */ diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h new file mode 100644 index 000000000000..d4cf96f07cfc --- /dev/null +++ b/include/linux/mailbox_controller.h @@ -0,0 +1,133 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MAILBOX_CONTROLLER_H +#define __MAILBOX_CONTROLLER_H + +#include +#include +#include +#include +#include + +struct mbox_chan; + +/** + * struct mbox_chan_ops - methods to control mailbox channels + * @send_data: The API asks the MBOX controller driver, in atomic + * context try to transmit a message on the bus. Returns 0 if + * data is accepted for transmission, -EBUSY while rejecting + * if the remote hasn't yet read the last data sent. Actual + * transmission of data is reported by the controller via + * mbox_chan_txdone (if it has some TX ACK irq). It must not + * sleep. + * @startup: Called when a client requests the chan. The controller + * could ask clients for additional parameters of communication + * to be provided via client's chan_data. This call may + * block. After this call the Controller must forward any + * data received on the chan by calling mbox_chan_received_data. + * The controller may do stuff that need to sleep. + * @shutdown: Called when a client relinquishes control of a chan. + * This call may block too. The controller must not forward + * any received data anymore. + * The controller may do stuff that need to sleep. + * @last_tx_done: If the controller sets 'txdone_poll', the API calls + * this to poll status of last TX. The controller must + * give priority to IRQ method over polling and never + * set both txdone_poll and txdone_irq. Only in polling + * mode 'send_data' is expected to return -EBUSY. + * The controller may do stuff that need to sleep/block. + * Used only if txdone_poll:=true && txdone_irq:=false + * @peek_data: Atomic check for any received data. Return true if controller + * has some data to push to the client. False otherwise. + */ +struct mbox_chan_ops { + int (*send_data)(struct mbox_chan *chan, void *data); + int (*startup)(struct mbox_chan *chan); + void (*shutdown)(struct mbox_chan *chan); + bool (*last_tx_done)(struct mbox_chan *chan); + bool (*peek_data)(struct mbox_chan *chan); +}; + +/** + * struct mbox_controller - Controller of a class of communication channels + * @dev: Device backing this controller + * @ops: Operators that work on each communication chan + * @chans: Array of channels + * @num_chans: Number of channels in the 'chans' array. + * @txdone_irq: Indicates if the controller can report to API when + * the last transmitted data was read by the remote. + * Eg, if it has some TX ACK irq. + * @txdone_poll: If the controller can read but not report the TX + * done. Ex, some register shows the TX status but + * no interrupt rises. Ignored if 'txdone_irq' is set. + * @txpoll_period: If 'txdone_poll' is in effect, the API polls for + * last TX's status after these many millisecs + * @of_xlate: Controller driver specific mapping of channel via DT + * @poll: API private. Used to poll for TXDONE on all channels. + * @node: API private. To hook into list of controllers. + */ +struct mbox_controller { + struct device *dev; + struct mbox_chan_ops *ops; + struct mbox_chan *chans; + int num_chans; + bool txdone_irq; + bool txdone_poll; + unsigned txpoll_period; + struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox, + const struct of_phandle_args *sp); + /* Internal to API */ + struct timer_list poll; + struct list_head node; +}; + +/* + * The length of circular buffer for queuing messages from a client. + * 'msg_count' tracks the number of buffered messages while 'msg_free' + * is the index where the next message would be buffered. + * We shouldn't need it too big because every transfer is interrupt + * triggered and if we have lots of data to transfer, the interrupt + * latencies are going to be the bottleneck, not the buffer length. + * Besides, mbox_send_message could be called from atomic context and + * the client could also queue another message from the notifier 'tx_done' + * of the last transfer done. + * REVISIT: If too many platforms see the "Try increasing MBOX_TX_QUEUE_LEN" + * print, it needs to be taken from config option or somesuch. + */ +#define MBOX_TX_QUEUE_LEN 20 + +/** + * struct mbox_chan - s/w representation of a communication chan + * @mbox: Pointer to the parent/provider of this channel + * @txdone_method: Way to detect TXDone chosen by the API + * @cl: Pointer to the current owner of this channel + * @tx_complete: Transmission completion + * @active_req: Currently active request hook + * @msg_count: No. of mssg currently queued + * @msg_free: Index of next available mssg slot + * @msg_data: Hook for data packet + * @lock: Serialise access to the channel + * @con_priv: Hook for controller driver to attach private data + */ +struct mbox_chan { + struct mbox_controller *mbox; + unsigned txdone_method; + struct mbox_client *cl; + struct completion tx_complete; + void *active_req; + unsigned msg_count, msg_free; + void *msg_data[MBOX_TX_QUEUE_LEN]; + spinlock_t lock; /* Serialise access to the channel */ + void *con_priv; +}; + +int mbox_controller_register(struct mbox_controller *mbox); /* can sleep */ +void mbox_controller_unregister(struct mbox_controller *mbox); /* can sleep */ +void mbox_chan_received_data(struct mbox_chan *chan, void *data); /* atomic */ +void mbox_chan_txdone(struct mbox_chan *chan, int r); /* atomic */ + +#endif /* __MAILBOX_CONTROLLER_H */ -- cgit v1.2.3-59-g8ed1b From a9282d01cf357379ce29103cec5e7651a53c634d Mon Sep 17 00:00:00 2001 From: Ian Munsie Date: Wed, 8 Oct 2014 19:55:05 +1100 Subject: cxl: Add documentation for userspace APIs This documentation gives an overview of the hardware architecture, userspace APIs via /dev/cxl/afuM.N and the syfs files. It also adds a MAINTAINERS file entry for cxl. Signed-off-by: Ian Munsie Signed-off-by: Michael Neuling Signed-off-by: Michael Ellerman --- Documentation/ABI/testing/sysfs-class-cxl | 129 ++++++++++ Documentation/ioctl/ioctl-number.txt | 1 + Documentation/powerpc/00-INDEX | 2 + Documentation/powerpc/cxl.txt | 379 ++++++++++++++++++++++++++++++ MAINTAINERS | 12 + include/uapi/misc/cxl.h | 7 +- 6 files changed, 527 insertions(+), 3 deletions(-) create mode 100644 Documentation/ABI/testing/sysfs-class-cxl create mode 100644 Documentation/powerpc/cxl.txt (limited to 'MAINTAINERS') diff --git a/Documentation/ABI/testing/sysfs-class-cxl b/Documentation/ABI/testing/sysfs-class-cxl new file mode 100644 index 000000000000..554405ec1955 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-cxl @@ -0,0 +1,129 @@ +Slave contexts (eg. /sys/class/cxl/afu0.0s): + +What: /sys/class/cxl//irqs_max +Date: September 2014 +Contact: linuxppc-dev@lists.ozlabs.org +Description: read/write + Decimal value of maximum number of interrupts that can be + requested by userspace. The default on probe is the maximum + that hardware can support (eg. 2037). Write values will limit + userspace applications to that many userspace interrupts. Must + be >= irqs_min. + +What: /sys/class/cxl//irqs_min +Date: September 2014 +Contact: linuxppc-dev@lists.ozlabs.org +Description: read only + Decimal value of the minimum number of interrupts that + userspace must request on a CXL_START_WORK ioctl. Userspace may + omit the num_interrupts field in the START_WORK IOCTL to get + this minimum automatically. + +What: /sys/class/cxl//mmio_size +Date: September 2014 +Contact: linuxppc-dev@lists.ozlabs.org +Description: read only + Decimal value of the size of the MMIO space that may be mmaped + by userspace. + +What: /sys/class/cxl//modes_supported +Date: September 2014 +Contact: linuxppc-dev@lists.ozlabs.org +Description: read only + List of the modes this AFU supports. One per line. + Valid entries are: "dedicated_process" and "afu_directed" + +What: /sys/class/cxl//mode +Date: September 2014 +Contact: linuxppc-dev@lists.ozlabs.org +Description: read/write + The current mode the AFU is using. Will be one of the modes + given in modes_supported. Writing will change the mode + provided that no user contexts are attached. + + +What: /sys/class/cxl//prefault_mode +Date: September 2014 +Contact: linuxppc-dev@lists.ozlabs.org +Description: read/write + Set the mode for prefaulting in segments into the segment table + when performing the START_WORK ioctl. Possible values: + none: No prefaulting (default) + work_element_descriptor: Treat the work element + descriptor as an effective address and + prefault what it points to. + all: all segments process calling START_WORK maps. + +What: /sys/class/cxl//reset +Date: September 2014 +Contact: linuxppc-dev@lists.ozlabs.org +Description: write only + Writing 1 here will reset the AFU provided there are not + contexts active on the AFU. + +What: /sys/class/cxl//api_version +Date: September 2014 +Contact: linuxppc-dev@lists.ozlabs.org +Description: read only + Decimal value of the current version of the kernel/user API. + +What: /sys/class/cxl//api_version_com +Date: September 2014 +Contact: linuxppc-dev@lists.ozlabs.org +Description: read only + Decimal value of the the lowest version of the userspace API + this this kernel supports. + + + +Master contexts (eg. /sys/class/cxl/afu0.0m) + +What: /sys/class/cxl/m/mmio_size +Date: September 2014 +Contact: linuxppc-dev@lists.ozlabs.org +Description: read only + Decimal value of the size of the MMIO space that may be mmaped + by userspace. This includes all slave contexts space also. + +What: /sys/class/cxl/m/pp_mmio_len +Date: September 2014 +Contact: linuxppc-dev@lists.ozlabs.org +Description: read only + Decimal value of the Per Process MMIO space length. + +What: /sys/class/cxl/m/pp_mmio_off +Date: September 2014 +Contact: linuxppc-dev@lists.ozlabs.org +Description: read only + Decimal value of the Per Process MMIO space offset. + + +Card info (eg. /sys/class/cxl/card0) + +What: /sys/class/cxl//caia_version +Date: September 2014 +Contact: linuxppc-dev@lists.ozlabs.org +Description: read only + Identifies the CAIA Version the card implements. + +What: /sys/class/cxl//psl_version +Date: September 2014 +Contact: linuxppc-dev@lists.ozlabs.org +Description: read only + Identifies the revision level of the PSL. + +What: /sys/class/cxl//base_image +Date: September 2014 +Contact: linuxppc-dev@lists.ozlabs.org +Description: read only + Identifies the revision level of the base image for devices + that support loadable PSLs. For FPGAs this field identifies + the image contained in the on-adapter flash which is loaded + during the initial program load. + +What: /sys/class/cxl//image_loaded +Date: September 2014 +Contact: linuxppc-dev@lists.ozlabs.org +Description: read only + Will return "user" or "factory" depending on the image loaded + onto the card. diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt index 7e240a7c9ab1..8136e1fd30fd 100644 --- a/Documentation/ioctl/ioctl-number.txt +++ b/Documentation/ioctl/ioctl-number.txt @@ -313,6 +313,7 @@ Code Seq#(hex) Include File Comments 0xB1 00-1F PPPoX 0xB3 00 linux/mmc/ioctl.h 0xC0 00-0F linux/usb/iowarrior.h +0xCA 00-0F uapi/misc/cxl.h 0xCB 00-1F CBM serial IEC bus in development: 0xCD 01 linux/reiserfs_fs.h diff --git a/Documentation/powerpc/00-INDEX b/Documentation/powerpc/00-INDEX index a68784d0a1ee..6fd0e8bb8140 100644 --- a/Documentation/powerpc/00-INDEX +++ b/Documentation/powerpc/00-INDEX @@ -11,6 +11,8 @@ bootwrapper.txt cpu_features.txt - info on how we support a variety of CPUs with minimal compile-time options. +cxl.txt + - Overview of the CXL driver. eeh-pci-error-recovery.txt - info on PCI Bus EEH Error Recovery firmware-assisted-dump.txt diff --git a/Documentation/powerpc/cxl.txt b/Documentation/powerpc/cxl.txt new file mode 100644 index 000000000000..2c71ecc519d9 --- /dev/null +++ b/Documentation/powerpc/cxl.txt @@ -0,0 +1,379 @@ +Coherent Accelerator Interface (CXL) +==================================== + +Introduction +============ + + The coherent accelerator interface is designed to allow the + coherent connection of accelerators (FPGAs and other devices) to a + POWER system. These devices need to adhere to the Coherent + Accelerator Interface Architecture (CAIA). + + IBM refers to this as the Coherent Accelerator Processor Interface + or CAPI. In the kernel it's referred to by the name CXL to avoid + confusion with the ISDN CAPI subsystem. + + Coherent in this context means that the accelerator and CPUs can + both access system memory directly and with the same effective + addresses. + + +Hardware overview +================= + + POWER8 FPGA + +----------+ +---------+ + | | | | + | CPU | | AFU | + | | | | + | | | | + | | | | + +----------+ +---------+ + | PHB | | | + | +------+ | PSL | + | | CAPP |<------>| | + +---+------+ PCIE +---------+ + + The POWER8 chip has a Coherently Attached Processor Proxy (CAPP) + unit which is part of the PCIe Host Bridge (PHB). This is managed + by Linux by calls into OPAL. Linux doesn't directly program the + CAPP. + + The FPGA (or coherently attached device) consists of two parts. + The POWER Service Layer (PSL) and the Accelerator Function Unit + (AFU). The AFU is used to implement specific functionality behind + the PSL. The PSL, among other things, provides memory address + translation services to allow each AFU direct access to userspace + memory. + + The AFU is the core part of the accelerator (eg. the compression, + crypto etc function). The kernel has no knowledge of the function + of the AFU. Only userspace interacts directly with the AFU. + + The PSL provides the translation and interrupt services that the + AFU needs. This is what the kernel interacts with. For example, if + the AFU needs to read a particular effective address, it sends + that address to the PSL, the PSL then translates it, fetches the + data from memory and returns it to the AFU. If the PSL has a + translation miss, it interrupts the kernel and the kernel services + the fault. The context to which this fault is serviced is based on + who owns that acceleration function. + + +AFU Modes +========= + + There are two programming modes supported by the AFU. Dedicated + and AFU directed. AFU may support one or both modes. + + When using dedicated mode only one MMU context is supported. In + this mode, only one userspace process can use the accelerator at + time. + + When using AFU directed mode, up to 16K simultaneous contexts can + be supported. This means up to 16K simultaneous userspace + applications may use the accelerator (although specific AFUs may + support fewer). In this mode, the AFU sends a 16 bit context ID + with each of its requests. This tells the PSL which context is + associated with each operation. If the PSL can't translate an + operation, the ID can also be accessed by the kernel so it can + determine the userspace context associated with an operation. + + +MMIO space +========== + + A portion of the accelerator MMIO space can be directly mapped + from the AFU to userspace. Either the whole space can be mapped or + just a per context portion. The hardware is self describing, hence + the kernel can determine the offset and size of the per context + portion. + + +Interrupts +========== + + AFUs may generate interrupts that are destined for userspace. These + are received by the kernel as hardware interrupts and passed onto + userspace by a read syscall documented below. + + Data storage faults and error interrupts are handled by the kernel + driver. + + +Work Element Descriptor (WED) +============================= + + The WED is a 64-bit parameter passed to the AFU when a context is + started. Its format is up to the AFU hence the kernel has no + knowledge of what it represents. Typically it will be the + effective address of a work queue or status block where the AFU + and userspace can share control and status information. + + + + +User API +======== + + For AFUs operating in AFU directed mode, two character device + files will be created. /dev/cxl/afu0.0m will correspond to a + master context and /dev/cxl/afu0.0s will correspond to a slave + context. Master contexts have access to the full MMIO space an + AFU provides. Slave contexts have access to only the per process + MMIO space an AFU provides. + + For AFUs operating in dedicated process mode, the driver will + only create a single character device per AFU called + /dev/cxl/afu0.0d. This will have access to the entire MMIO space + that the AFU provides (like master contexts in AFU directed). + + The types described below are defined in include/uapi/misc/cxl.h + + The following file operations are supported on both slave and + master devices. + + +open +---- + + Opens the device and allocates a file descriptor to be used with + the rest of the API. + + A dedicated mode AFU only has one context and only allows the + device to be opened once. + + An AFU directed mode AFU can have many contexts, the device can be + opened once for each context that is available. + + When all available contexts are allocated the open call will fail + and return -ENOSPC. + + Note: IRQs need to be allocated for each context, which may limit + the number of contexts that can be created, and therefore + how many times the device can be opened. The POWER8 CAPP + supports 2040 IRQs and 3 are used by the kernel, so 2037 are + left. If 1 IRQ is needed per context, then only 2037 + contexts can be allocated. If 4 IRQs are needed per context, + then only 2037/4 = 509 contexts can be allocated. + + +ioctl +----- + + CXL_IOCTL_START_WORK: + Starts the AFU context and associates it with the current + process. Once this ioctl is successfully executed, all memory + mapped into this process is accessible to this AFU context + using the same effective addresses. No additional calls are + required to map/unmap memory. The AFU memory context will be + updated as userspace allocates and frees memory. This ioctl + returns once the AFU context is started. + + Takes a pointer to a struct cxl_ioctl_start_work: + + struct cxl_ioctl_start_work { + __u64 flags; + __u64 work_element_descriptor; + __u64 amr; + __s16 num_interrupts; + __s16 reserved1; + __s32 reserved2; + __u64 reserved3; + __u64 reserved4; + __u64 reserved5; + __u64 reserved6; + }; + + flags: + Indicates which optional fields in the structure are + valid. + + work_element_descriptor: + The Work Element Descriptor (WED) is a 64-bit argument + defined by the AFU. Typically this is an effective + address pointing to an AFU specific structure + describing what work to perform. + + amr: + Authority Mask Register (AMR), same as the powerpc + AMR. This field is only used by the kernel when the + corresponding CXL_START_WORK_AMR value is specified in + flags. If not specified the kernel will use a default + value of 0. + + num_interrupts: + Number of userspace interrupts to request. This field + is only used by the kernel when the corresponding + CXL_START_WORK_NUM_IRQS value is specified in flags. + If not specified the minimum number required by the + AFU will be allocated. The min and max number can be + obtained from sysfs. + + reserved fields: + For ABI padding and future extensions + + CXL_IOCTL_GET_PROCESS_ELEMENT: + Get the current context id, also known as the process element. + The value is returned from the kernel as a __u32. + + +mmap +---- + + An AFU may have an MMIO space to facilitate communication with the + AFU. If it does, the MMIO space can be accessed via mmap. The size + and contents of this area are specific to the particular AFU. The + size can be discovered via sysfs. + + In AFU directed mode, master contexts are allowed to map all of + the MMIO space and slave contexts are allowed to only map the per + process MMIO space associated with the context. In dedicated + process mode the entire MMIO space can always be mapped. + + This mmap call must be done after the START_WORK ioctl. + + Care should be taken when accessing MMIO space. Only 32 and 64-bit + accesses are supported by POWER8. Also, the AFU will be designed + with a specific endianness, so all MMIO accesses should consider + endianness (recommend endian(3) variants like: le64toh(), + be64toh() etc). These endian issues equally apply to shared memory + queues the WED may describe. + + +read +---- + + Reads events from the AFU. Blocks if no events are pending + (unless O_NONBLOCK is supplied). Returns -EIO in the case of an + unrecoverable error or if the card is removed. + + read() will always return an integral number of events. + + The buffer passed to read() must be at least 4K bytes. + + The result of the read will be a buffer of one or more events, + each event is of type struct cxl_event, of varying size. + + struct cxl_event { + struct cxl_event_header header; + union { + struct cxl_event_afu_interrupt irq; + struct cxl_event_data_storage fault; + struct cxl_event_afu_error afu_error; + }; + }; + + The struct cxl_event_header is defined as: + + struct cxl_event_header { + __u16 type; + __u16 size; + __u16 process_element; + __u16 reserved1; + }; + + type: + This defines the type of event. The type determines how + the rest of the event is structured. These types are + described below and defined by enum cxl_event_type. + + size: + This is the size of the event in bytes including the + struct cxl_event_header. The start of the next event can + be found at this offset from the start of the current + event. + + process_element: + Context ID of the event. + + reserved field: + For future extensions and padding. + + If the event type is CXL_EVENT_AFU_INTERRUPT then the event + structure is defined as: + + struct cxl_event_afu_interrupt { + __u16 flags; + __u16 irq; /* Raised AFU interrupt number */ + __u32 reserved1; + }; + + flags: + These flags indicate which optional fields are present + in this struct. Currently all fields are mandatory. + + irq: + The IRQ number sent by the AFU. + + reserved field: + For future extensions and padding. + + If the event type is CXL_EVENT_DATA_STORAGE then the event + structure is defined as: + + struct cxl_event_data_storage { + __u16 flags; + __u16 reserved1; + __u32 reserved2; + __u64 addr; + __u64 dsisr; + __u64 reserved3; + }; + + flags: + These flags indicate which optional fields are present in + this struct. Currently all fields are mandatory. + + address: + The address that the AFU unsuccessfully attempted to + access. Valid accesses will be handled transparently by the + kernel but invalid accesses will generate this event. + + dsisr: + This field gives information on the type of fault. It is a + copy of the DSISR from the PSL hardware when the address + fault occurred. The form of the DSISR is as defined in the + CAIA. + + reserved fields: + For future extensions + + If the event type is CXL_EVENT_AFU_ERROR then the event structure + is defined as: + + struct cxl_event_afu_error { + __u16 flags; + __u16 reserved1; + __u32 reserved2; + __u64 error; + }; + + flags: + These flags indicate which optional fields are present in + this struct. Currently all fields are Mandatory. + + error: + Error status from the AFU. Defined by the AFU. + + reserved fields: + For future extensions and padding + +Sysfs Class +=========== + + A cxl sysfs class is added under /sys/class/cxl to facilitate + enumeration and tuning of the accelerators. Its layout is + described in Documentation/ABI/testing/sysfs-class-cxl + +Udev rules +========== + + The following udev rules could be used to create a symlink to the + most logical chardev to use in any programming mode (afuX.Yd for + dedicated, afuX.Ys for afu directed), since the API is virtually + identical for each: + + SUBSYSTEM=="cxl", ATTRS{mode}=="dedicated_process", SYMLINK="cxl/%b" + SUBSYSTEM=="cxl", ATTRS{mode}=="afu_directed", \ + KERNEL=="afu[0-9]*.[0-9]*s", SYMLINK="cxl/%b" diff --git a/MAINTAINERS b/MAINTAINERS index 809ecd680d88..facc2198eaba 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2711,6 +2711,18 @@ W: http://www.chelsio.com S: Supported F: drivers/net/ethernet/chelsio/cxgb4vf/ +CXL (IBM Coherent Accelerator Processor Interface CAPI) DRIVER +M: Ian Munsie +M: Michael Neuling +L: linuxppc-dev@lists.ozlabs.org +S: Supported +F: drivers/misc/cxl/ +F: include/misc/cxl.h +F: include/uapi/misc/cxl.h +F: Documentation/powerpc/cxl.txt +F: Documentation/powerpc/cxl.txt +F: Documentation/ABI/testing/sysfs-class-cxl + STMMAC ETHERNET DRIVER M: Giuseppe Cavallaro L: netdev@vger.kernel.org diff --git a/include/uapi/misc/cxl.h b/include/uapi/misc/cxl.h index c232be6ae21f..cd6d789b73ec 100644 --- a/include/uapi/misc/cxl.h +++ b/include/uapi/misc/cxl.h @@ -13,7 +13,7 @@ #include #include -/* Structs for IOCTLS for userspace to talk to the kernel */ + struct cxl_ioctl_start_work { __u64 flags; __u64 work_element_descriptor; @@ -26,19 +26,20 @@ struct cxl_ioctl_start_work { __u64 reserved5; __u64 reserved6; }; + #define CXL_START_WORK_AMR 0x0000000000000001ULL #define CXL_START_WORK_NUM_IRQS 0x0000000000000002ULL #define CXL_START_WORK_ALL (CXL_START_WORK_AMR |\ CXL_START_WORK_NUM_IRQS) -/* IOCTL numbers */ +/* ioctl numbers */ #define CXL_MAGIC 0xCA #define CXL_IOCTL_START_WORK _IOW(CXL_MAGIC, 0x00, struct cxl_ioctl_start_work) #define CXL_IOCTL_GET_PROCESS_ELEMENT _IOR(CXL_MAGIC, 0x01, __u32) -/* Events from read() */ #define CXL_READ_MIN_SIZE 0x1000 /* 4K */ +/* Events from read() */ enum cxl_event_type { CXL_EVENT_RESERVED = 0, CXL_EVENT_AFU_INTERRUPT = 1, -- cgit v1.2.3-59-g8ed1b From 554077c54beafc8d5fe46fa42bf36d9322a51855 Mon Sep 17 00:00:00 2001 From: Stephen Warren Date: Fri, 3 Oct 2014 09:50:32 -0600 Subject: MAINTAINERS: add Alexandre Courbot for Tegra I'd like to propose Alexandre Courbot as an additional Tegra maintainer. He's been working on a variety of Tegra-related code for a while, and is now officially tasked with working on upstream support. Signed-off-by: Stephen Warren Acked-by: Alexandre Courbot Acked-by: Thierry Reding -- v2: * Use Alexandre's full name. * Use a non-NVIDIA email address to avoid Exchange Server patch corruption. Signed-off-by: Arnd Bergmann --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index f04c8b672622..d50de1a937b0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9010,6 +9010,7 @@ F: drivers/media/rc/ttusbir.c TEGRA ARCHITECTURE SUPPORT M: Stephen Warren M: Thierry Reding +M: Alexandre Courbot L: linux-tegra@vger.kernel.org Q: http://patchwork.ozlabs.org/project/linux-tegra/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tegra/linux.git -- cgit v1.2.3-59-g8ed1b From 356d41422b00fed4bf8622a0721039a18f687fa0 Mon Sep 17 00:00:00 2001 From: Stephen Warren Date: Fri, 3 Oct 2014 09:50:33 -0600 Subject: MAINTAINERS: condense some Tegra related entries There's little point having specific entries in MAINTAINERS for Tegra drivers that are already covered by the top-level Tegra architecture support entry, and maintained by people listed there. Remove the duplicates. Signed-off-by: Stephen Warren Acked-by: Thierry Reding Signed-off-by: Arnd Bergmann --- MAINTAINERS | 15 --------------- 1 file changed, 15 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index d50de1a937b0..ed86a568c350 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9017,11 +9017,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tegra/linux.git S: Supported N: [^a-z]tegra -TEGRA ASOC DRIVER -M: Stephen Warren -S: Supported -F: sound/soc/tegra/ - TEGRA CLOCK DRIVER M: Peter De Schrijver M: Prashant Gaikwad @@ -9033,11 +9028,6 @@ M: Laxman Dewangan S: Supported F: drivers/dma/tegra20-apb-dma.c -TEGRA GPIO DRIVER -M: Stephen Warren -S: Supported -F: drivers/gpio/gpio-tegra.c - TEGRA I2C DRIVER M: Laxman Dewangan S: Supported @@ -9054,11 +9044,6 @@ M: Laxman Dewangan S: Supported F: drivers/input/keyboard/tegra-kbc.c -TEGRA PINCTRL DRIVER -M: Stephen Warren -S: Supported -F: drivers/pinctrl/pinctrl-tegra* - TEGRA PWM DRIVER M: Thierry Reding S: Supported -- cgit v1.2.3-59-g8ed1b From 5df27823b5552e885e02c1ea0d2f52326d7d710c Mon Sep 17 00:00:00 2001 From: Shawn Guo Date: Wed, 8 Oct 2014 20:31:29 +0800 Subject: MAINTAINERS: update Shawn's email address My Freescale email address will be gone shortly. Update my email to be the Linaro one. Signed-off-by: Shawn Guo Signed-off-by: Arnd Bergmann --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index ed86a568c350..270d4abffb46 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -969,7 +969,7 @@ F: arch/arm/include/asm/hardware/dec21285.h F: arch/arm/mach-footbridge/ ARM/FREESCALE IMX / MXC ARM ARCHITECTURE -M: Shawn Guo +M: Shawn Guo M: Sascha Hauer L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -- cgit v1.2.3-59-g8ed1b From b261aeafe115256c9b4589dd7bd4ca877eb0fa6c Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Wed, 1 Oct 2014 14:02:17 +0300 Subject: IB/iser: Bump version, add maintainer Update the driver version and add Sagi Grimberg as maintainer Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier --- MAINTAINERS | 1 + drivers/infiniband/ulp/iser/iscsi_iser.h | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 37054306dc9f..1f3c82833ea2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5020,6 +5020,7 @@ F: include/scsi/*iscsi* ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR M: Or Gerlitz +M: Sagi Grimberg M: Roi Dayan L: linux-rdma@vger.kernel.org S: Supported diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 5250a125b79e..cd4174ca9a76 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -69,7 +69,7 @@ #define DRV_NAME "iser" #define PFX DRV_NAME ": " -#define DRV_VER "1.4.1" +#define DRV_VER "1.4.8" #define iser_dbg(fmt, arg...) \ do { \ -- cgit v1.2.3-59-g8ed1b From b61d18904e2a99ed16b6e97d5419f1db19e08bd2 Mon Sep 17 00:00:00 2001 From: Iyappan Subramanian Date: Thu, 9 Oct 2014 18:32:02 -0700 Subject: MAINTAINERS: Update APM X-Gene section Updated APM X-Gene ethernet driver maintainers list. Signed-off-by: Iyappan Subramanian Signed-off-by: Keyur Chudgar Signed-off-by: David S. Miller --- MAINTAINERS | 1 - 1 file changed, 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 9e315a44ae0c..1a85af843326 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -734,7 +734,6 @@ F: net/appletalk/ APPLIED MICRO (APM) X-GENE SOC ETHERNET DRIVER M: Iyappan Subramanian M: Keyur Chudgar -M: Ravi Patel S: Supported F: drivers/net/ethernet/apm/xgene/ F: Documentation/devicetree/bindings/net/apm-xgene-enet.txt -- cgit v1.2.3-59-g8ed1b From fb9d4959d2fbe564720b9a00ed6dabeca0870811 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Mon, 13 Oct 2014 15:51:15 -0700 Subject: MAINTAINERS: assign systemace driver to Xilinx Assign systemace driver to Xilinx Zynq to ensure if there is a change that someone can even test it. Signed-off-by: Michal Simek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- MAINTAINERS | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index f413abff3807..7172d12c2776 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1550,6 +1550,7 @@ T: git git://git.xilinx.com/linux-xlnx.git S: Supported F: arch/arm/mach-zynq/ F: drivers/cpuidle/cpuidle-zynq.c +F: drivers/block/xsysace.c N: zynq N: xilinx F: drivers/clocksource/cadence_ttc_timer.c @@ -10315,10 +10316,6 @@ M: John Linn S: Maintained F: drivers/net/ethernet/xilinx/xilinx_axienet* -XILINX SYSTEMACE DRIVER -S: Orphan -F: drivers/block/xsysace.c - XILINX UARTLITE SERIAL DRIVER M: Peter Korsgaard L: linux-serial@vger.kernel.org -- cgit v1.2.3-59-g8ed1b From 004bbd3c01d4811d9bd88061d44773943c4df87b Mon Sep 17 00:00:00 2001 From: Christian Kujau Date: Mon, 13 Oct 2014 15:51:17 -0700 Subject: MAINTAINERS: remove non existent files Inspired by some recent cleanups in MAINTAINERS the following files (F:) cannot be found any more in the tree: * arch/arm/mach-s5pv210/mach-aquila.c * arch/arm/mach-s5pv210/mach-goni.c Those two got removed in commit 28c8331d386a ("ARM: S5PV210: Remove support for board files"). Cc: Tomasz Figa Cc: Kyungmin Park * drivers/rtc/rtc-sec.c A MAINTAINERS fix was attempted in November 2012, but dismissed as rtc-sec.c was still being worked on. Alas, it's still not there. "MAINTAINERS: fix drivers/rtc/rtc-sec.c" http://lkml.iu.edu/hypermail/linux/kernel/1211.2/04820.html Cc: Sangbeom Kim Cc: Cesar Eduardo Barros Signed-off-by: Christian Kujau Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- MAINTAINERS | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 7172d12c2776..b902aace867e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1338,8 +1338,7 @@ ARM/SAMSUNG MOBILE MACHINE SUPPORT M: Kyungmin Park L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -F: arch/arm/mach-s5pv210/mach-aquila.c -F: arch/arm/mach-s5pv210/mach-goni.c +F: arch/arm/mach-s5pv210/ ARM/SAMSUNG S5P SERIES 2D GRAPHICS ACCELERATION (G2D) SUPPORT M: Kyungmin Park @@ -7975,7 +7974,6 @@ S: Supported F: drivers/mfd/sec*.c F: drivers/regulator/s2m*.c F: drivers/regulator/s5m*.c -F: drivers/rtc/rtc-sec.c F: include/linux/mfd/samsung/ SAMSUNG S5P/EXYNOS4 SOC SERIES CAMERA SUBSYSTEM DRIVERS -- cgit v1.2.3-59-g8ed1b From 8ada6d2d3063148202caa43b9bf375e646a4efb3 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 13 Oct 2014 15:51:19 -0700 Subject: MAINTAINERS: linaro-mm-sig is moderated Previous patch is awaiting moderator approval for posting to this mailing list. Signed-off-by: Randy Dunlap Cc: Sumit Semwal Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index b902aace867e..cead1126c2b1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3048,7 +3048,7 @@ M: Sumit Semwal S: Maintained L: linux-media@vger.kernel.org L: dri-devel@lists.freedesktop.org -L: linaro-mm-sig@lists.linaro.org +L: linaro-mm-sig@lists.linaro.org (moderated for non-subscribers) F: drivers/dma-buf/ F: include/linux/dma-buf* F: include/linux/reservation.h -- cgit v1.2.3-59-g8ed1b From 13b122b3e5843f6177d22bd9484ef58b3660e7da Mon Sep 17 00:00:00 2001 From: Shuah Khan Date: Mon, 13 Oct 2014 15:51:21 -0700 Subject: MAINTAINERS: add entry for Kernel Selftest Framework Add entry for Kernel Selftest Framework. Individual tests continue to be maintained by the maintainers for those areas. Signed-off-by: Shuah Khan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- MAINTAINERS | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index cead1126c2b1..c08f21d333f9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5300,6 +5300,13 @@ F: include/linux/lockd/ F: include/linux/sunrpc/ F: include/uapi/linux/sunrpc/ +KERNEL SELFTEST FRAMEWORK +M: Shuah Khan +L: linux-api@vger.kernel.org +T: git git://git.kernel.org/pub/scm/shuah/linux-kselftest +S: Maintained +F: tools/testing/selftests + KERNEL VIRTUAL MACHINE (KVM) M: Gleb Natapov M: Paolo Bonzini -- cgit v1.2.3-59-g8ed1b From 5cbac98ad146af2da00bdf004a36b684cb1dc632 Mon Sep 17 00:00:00 2001 From: Josh Wu Date: Mon, 13 Oct 2014 15:51:24 -0700 Subject: MAINTAINERS: add atmel nand driver maintainer entry Add an entry in MAINTAINERS file for ATMEL nand driver. Signed-off-by: Josh Wu Acked-by: Nicolas Ferre Cc: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- MAINTAINERS | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index c08f21d333f9..674ee922b12d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1738,6 +1738,12 @@ M: Nicolas Ferre S: Supported F: drivers/net/ethernet/cadence/ +ATMEL NAND DRIVER +M: Josh Wu +L: linux-mtd@lists.infradead.org +S: Supported +F: drivers/mtd/nand/atmel_nand* + ATMEL SPI DRIVER M: Nicolas Ferre S: Supported -- cgit v1.2.3-59-g8ed1b From b4174867bee83e79dc155479cb1b67c452da6476 Mon Sep 17 00:00:00 2001 From: Michael Opdenacker Date: Mon, 13 Oct 2014 15:51:26 -0700 Subject: MAINTAINERS: orphan m32r Update the maintenance status for m32r - Removing Hirokazu Takata as maintainer (last commit merged: Nov. 2009) - Remove mailing lists that no longer exist, as the ml.linux-m32r.org subdomain no longer exists. - Maintenance status moved to "Orphan" Signed-off-by: Michael Opdenacker Acked-by: Hirokazu Takata Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- MAINTAINERS | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 674ee922b12d..b8a92ad2863e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5759,11 +5759,8 @@ T: git git://github.com/linux-test-project/ltp.git S: Maintained M32R ARCHITECTURE -M: Hirokazu Takata -L: linux-m32r@ml.linux-m32r.org (moderated for non-subscribers) -L: linux-m32r-ja@ml.linux-m32r.org (in Japanese) W: http://www.linux-m32r.org/ -S: Maintained +S: Orphan F: arch/m32r/ M68K ARCHITECTURE -- cgit v1.2.3-59-g8ed1b From af9f1b3c7fa953d3df82a4548d8984de40ac9017 Mon Sep 17 00:00:00 2001 From: Michael Opdenacker Date: Mon, 13 Oct 2014 15:51:28 -0700 Subject: MAINTAINERS: remove Chirag Kantharia, invalid e-mail This removes Chirag Kantharia from the MAINTAINERS file, as his e-mail address is now rejected by the HP mail server. Make the driver "Orphan" until he gets back with a working e-mail address or a new maintainer steps in. Signed-off-by: Michael Opdenacker Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- MAINTAINERS | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index b8a92ad2863e..c52367997fb5 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4303,9 +4303,8 @@ S: Maintained F: drivers/media/dvb-frontends/hd29l2* HEWLETT-PACKARD SMART2 RAID DRIVER -M: Chirag Kantharia L: iss_storagedev@hp.com -S: Maintained +S: Orphan F: Documentation/blockdev/cpqarray.txt F: drivers/block/cpqarray.* -- cgit v1.2.3-59-g8ed1b From c53fed07a03d8b2a2e3bdaba87768211fa55806c Mon Sep 17 00:00:00 2001 From: Vince Bridgers Date: Thu, 9 Oct 2014 10:08:42 -0500 Subject: MAINTAINERS: Update contact information for Vince Bridgers Signed-off-by: Vince Bridgers Signed-off-by: David S. Miller --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 1a85af843326..6dc0840c1aa2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -564,7 +564,7 @@ L: linux-alpha@vger.kernel.org F: arch/alpha/ ALTERA TRIPLE SPEED ETHERNET DRIVER -M: Vince Bridgers +M: Vince Bridgers L: netdev@vger.kernel.org L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers) S: Maintained -- cgit v1.2.3-59-g8ed1b From 97215800e4b74212e51b1f373877f91f97579411 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Mon, 13 Oct 2014 14:16:28 -0400 Subject: MAINTAINERS: Update Santosh Shilimkar's email id Signed-off-by: Santosh Shilimkar Signed-off-by: Olof Johansson --- MAINTAINERS | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index b0f17d59078e..c29c24b3396c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1088,33 +1088,33 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/TEXAS INSTRUMENT KEYSTONE ARCHITECTURE -M: Santosh Shilimkar +M: Santosh Shilimkar L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-keystone/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git ARM/TEXAS INSTRUMENT KEYSTONE CLOCK FRAMEWORK -M: Santosh Shilimkar +M: Santosh Shilimkar L: linux-kernel@vger.kernel.org S: Maintained F: drivers/clk/keystone/ ARM/TEXAS INSTRUMENT KEYSTONE ClOCKSOURCE -M: Santosh Shilimkar +M: Santosh Shilimkar L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-kernel@vger.kernel.org S: Maintained F: drivers/clocksource/timer-keystone.c ARM/TEXAS INSTRUMENT KEYSTONE RESET DRIVER -M: Santosh Shilimkar +M: Santosh Shilimkar L: linux-kernel@vger.kernel.org S: Maintained F: drivers/power/reset/keystone-reset.c ARM/TEXAS INSTRUMENT AEMIF/EMIF DRIVERS -M: Santosh Shilimkar +M: Santosh Shilimkar L: linux-kernel@vger.kernel.org S: Maintained F: drivers/memory/*emif* @@ -6686,7 +6686,7 @@ F: arch/arm/*omap*/usb* OMAP GPIO DRIVER M: Javier Martinez Canillas -M: Santosh Shilimkar +M: Santosh Shilimkar M: Kevin Hilman L: linux-omap@vger.kernel.org S: Maintained @@ -9273,7 +9273,7 @@ F: drivers/mmc/host/tifm_sd.c F: include/linux/tifm.h TI KEYSTONE MULTICORE NAVIGATOR DRIVERS -M: Santosh Shilimkar +M: Santosh Shilimkar L: linux-kernel@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -- cgit v1.2.3-59-g8ed1b From ad3118b9861379e3a77883613369cb967ffac26a Mon Sep 17 00:00:00 2001 From: Jonathan Corbet Date: Fri, 17 Oct 2014 08:59:26 -0400 Subject: MAINTAINERS: Become the docs maintainer It seems it's my turn to be the documentation maintainer for a bit. My plan is to work to ensure that docs patches don't fall through the cracks; I assume most changes will continue to flow through subsystem-specific trees. Acked-by: Jiri Kosina Signed-off-by: Jonathan Corbet Signed-off-by: Linus Torvalds --- MAINTAINERS | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index d61b727fbfa8..90baf1bef4ca 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3085,14 +3085,13 @@ S: Supported F: drivers/acpi/dock.c DOCUMENTATION -M: Jiri Kosina +M: Jonathan Corbet L: linux-doc@vger.kernel.org S: Maintained F: Documentation/ X: Documentation/ABI/ X: Documentation/devicetree/ X: Documentation/[a-z][a-z]_[A-Z][A-Z]/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/doc.git DOUBLETALK DRIVER M: "James R. Van Zandt" -- cgit v1.2.3-59-g8ed1b From fadc07522c3ce65c4d1c69a9284605a07aea1be8 Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Sun, 19 Oct 2014 19:19:57 +0300 Subject: MAINTAINERS: Change Boaz Harrosh's email I have moved on, and do no longer have Panasas email access. Update to an email that can reach me. So change bharrosh@panasas.com => ooo@electrozaur.com Explain of email address: * electrozaur.com is a domain owned by me. * ooo - Stands for Open Osd . Org Another email alias that can be used is: openosd@gmail.com CC: Greg KH Signed-off-by: Boaz Harrosh --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index f10ed3914ea8..fc2c9a838d92 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6725,7 +6725,7 @@ S: Orphan F: drivers/net/wireless/orinoco/ OSD LIBRARY and FILESYSTEM -M: Boaz Harrosh +M: Boaz Harrosh M: Benny Halevy L: osd-dev@open-osd.org W: http://open-osd.org -- cgit v1.2.3-59-g8ed1b From 9209bec4f8112928c796f70400a23d1f44469631 Mon Sep 17 00:00:00 2001 From: Scott Branden Date: Thu, 16 Oct 2014 21:57:16 -0600 Subject: MAINTAINERS: corrected bcm2835 search Corrected bcm2835 maintainer info by using N: to specify any files with bcm2835 in are directed to the proper maintainer. Also corrected minor mispelling of ARCHITECTURE in 2 comment locations. Signed-off-by: Scott Branden Signed-off-by: Stephen Warren Signed-off-by: Olof Johansson --- MAINTAINERS | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index c29c24b3396c..fc1ddec408fd 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2062,17 +2062,14 @@ F: arch/arm/configs/bcm_defconfig F: drivers/mmc/host/sdhci-bcm-kona.c F: drivers/clocksource/bcm_kona_timer.c -BROADCOM BCM2835 ARM ARCHICTURE +BROADCOM BCM2835 ARM ARCHITECTURE M: Stephen Warren L: linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers) T: git git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-rpi.git S: Maintained -F: arch/arm/mach-bcm/board_bcm2835.c -F: arch/arm/boot/dts/bcm2835* -F: arch/arm/configs/bcm2835_defconfig -F: drivers/*/*bcm2835* +N: bcm2835 -BROADCOM BCM5301X ARM ARCHICTURE +BROADCOM BCM5301X ARM ARCHITECTURE M: Hauke Mehrtens L: linux-arm-kernel@lists.infradead.org S: Maintained -- cgit v1.2.3-59-g8ed1b From d09e9b160fc18116942101743693f5535bc5136a Mon Sep 17 00:00:00 2001 From: Jeff Kirsher Date: Wed, 15 Oct 2014 15:53:27 -0700 Subject: staging: bcm: remove driver The Beceem WiMAX driver was barely function in its current state and was non-functional on 64 bit systems. Based on repeated statements from Greg KH that he wanted the driver removed, I am removing the driver. CC: Matthias Beyer CC: Kevin McKinney Signed-off-by: Jeff Kirsher Signed-off-by: Greg Kroah-Hartman --- MAINTAINERS | 7 - drivers/staging/Kconfig | 2 - drivers/staging/Makefile | 1 - drivers/staging/bcm/Adapter.h | 474 --- drivers/staging/bcm/Bcmchar.c | 2652 -------------- drivers/staging/bcm/Bcmnet.c | 240 -- drivers/staging/bcm/CmHost.c | 2254 ------------ drivers/staging/bcm/CmHost.h | 62 - drivers/staging/bcm/DDRInit.c | 1355 ------- drivers/staging/bcm/DDRInit.h | 9 - drivers/staging/bcm/Debug.h | 242 -- drivers/staging/bcm/HandleControlPacket.c | 241 -- drivers/staging/bcm/HostMIBSInterface.h | 192 - drivers/staging/bcm/IPv6Protocol.c | 476 --- drivers/staging/bcm/IPv6ProtocolHdr.h | 85 - drivers/staging/bcm/InterfaceAdapter.h | 79 - drivers/staging/bcm/InterfaceDld.c | 317 -- drivers/staging/bcm/InterfaceIdleMode.c | 274 -- drivers/staging/bcm/InterfaceIdleMode.h | 15 - drivers/staging/bcm/InterfaceInit.c | 729 ---- drivers/staging/bcm/InterfaceInit.h | 26 - drivers/staging/bcm/InterfaceIsr.c | 190 - drivers/staging/bcm/InterfaceIsr.h | 15 - drivers/staging/bcm/InterfaceMacros.h | 18 - drivers/staging/bcm/InterfaceMisc.c | 246 -- drivers/staging/bcm/InterfaceMisc.h | 42 - drivers/staging/bcm/InterfaceRx.c | 289 -- drivers/staging/bcm/InterfaceRx.h | 7 - drivers/staging/bcm/InterfaceTx.c | 213 -- drivers/staging/bcm/InterfaceTx.h | 7 - drivers/staging/bcm/Ioctl.h | 226 -- drivers/staging/bcm/Kconfig | 6 - drivers/staging/bcm/LeakyBucket.c | 363 -- drivers/staging/bcm/Macros.h | 352 -- drivers/staging/bcm/Makefile | 12 - drivers/staging/bcm/Misc.c | 1587 -------- drivers/staging/bcm/PHSDefines.h | 94 - drivers/staging/bcm/PHSModule.c | 1699 --------- drivers/staging/bcm/PHSModule.h | 59 - drivers/staging/bcm/Protocol.h | 128 - drivers/staging/bcm/Prototypes.h | 217 -- drivers/staging/bcm/Qos.c | 1199 ------ drivers/staging/bcm/Queue.h | 29 - drivers/staging/bcm/TODO | 26 - drivers/staging/bcm/Transmit.c | 271 -- drivers/staging/bcm/Typedefs.h | 47 - drivers/staging/bcm/cntrl_SignalingInterface.h | 311 -- drivers/staging/bcm/headers.h | 78 - drivers/staging/bcm/hostmibs.c | 164 - drivers/staging/bcm/led_control.c | 951 ----- drivers/staging/bcm/led_control.h | 84 - drivers/staging/bcm/nvm.c | 4661 ------------------------ drivers/staging/bcm/nvm.h | 286 -- drivers/staging/bcm/sort.c | 52 - drivers/staging/bcm/target_params.h | 57 - drivers/staging/bcm/vendorspecificextn.c | 145 - drivers/staging/bcm/vendorspecificextn.h | 18 - 57 files changed, 23881 deletions(-) delete mode 100644 drivers/staging/bcm/Adapter.h delete mode 100644 drivers/staging/bcm/Bcmchar.c delete mode 100644 drivers/staging/bcm/Bcmnet.c delete mode 100644 drivers/staging/bcm/CmHost.c delete mode 100644 drivers/staging/bcm/CmHost.h delete mode 100644 drivers/staging/bcm/DDRInit.c delete mode 100644 drivers/staging/bcm/DDRInit.h delete mode 100644 drivers/staging/bcm/Debug.h delete mode 100644 drivers/staging/bcm/HandleControlPacket.c delete mode 100644 drivers/staging/bcm/HostMIBSInterface.h delete mode 100644 drivers/staging/bcm/IPv6Protocol.c delete mode 100644 drivers/staging/bcm/IPv6ProtocolHdr.h delete mode 100644 drivers/staging/bcm/InterfaceAdapter.h delete mode 100644 drivers/staging/bcm/InterfaceDld.c delete mode 100644 drivers/staging/bcm/InterfaceIdleMode.c delete mode 100644 drivers/staging/bcm/InterfaceIdleMode.h delete mode 100644 drivers/staging/bcm/InterfaceInit.c delete mode 100644 drivers/staging/bcm/InterfaceInit.h delete mode 100644 drivers/staging/bcm/InterfaceIsr.c delete mode 100644 drivers/staging/bcm/InterfaceIsr.h delete mode 100644 drivers/staging/bcm/InterfaceMacros.h delete mode 100644 drivers/staging/bcm/InterfaceMisc.c delete mode 100644 drivers/staging/bcm/InterfaceMisc.h delete mode 100644 drivers/staging/bcm/InterfaceRx.c delete mode 100644 drivers/staging/bcm/InterfaceRx.h delete mode 100644 drivers/staging/bcm/InterfaceTx.c delete mode 100644 drivers/staging/bcm/InterfaceTx.h delete mode 100644 drivers/staging/bcm/Ioctl.h delete mode 100644 drivers/staging/bcm/Kconfig delete mode 100644 drivers/staging/bcm/LeakyBucket.c delete mode 100644 drivers/staging/bcm/Macros.h delete mode 100644 drivers/staging/bcm/Makefile delete mode 100644 drivers/staging/bcm/Misc.c delete mode 100644 drivers/staging/bcm/PHSDefines.h delete mode 100644 drivers/staging/bcm/PHSModule.c delete mode 100644 drivers/staging/bcm/PHSModule.h delete mode 100644 drivers/staging/bcm/Protocol.h delete mode 100644 drivers/staging/bcm/Prototypes.h delete mode 100644 drivers/staging/bcm/Qos.c delete mode 100644 drivers/staging/bcm/Queue.h delete mode 100644 drivers/staging/bcm/TODO delete mode 100644 drivers/staging/bcm/Transmit.c delete mode 100644 drivers/staging/bcm/Typedefs.h delete mode 100644 drivers/staging/bcm/cntrl_SignalingInterface.h delete mode 100644 drivers/staging/bcm/headers.h delete mode 100644 drivers/staging/bcm/hostmibs.c delete mode 100644 drivers/staging/bcm/led_control.c delete mode 100644 drivers/staging/bcm/led_control.h delete mode 100644 drivers/staging/bcm/nvm.c delete mode 100644 drivers/staging/bcm/nvm.h delete mode 100644 drivers/staging/bcm/sort.c delete mode 100644 drivers/staging/bcm/target_params.h delete mode 100644 drivers/staging/bcm/vendorspecificextn.c delete mode 100644 drivers/staging/bcm/vendorspecificextn.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index a20df9bf8ab0..d0dd1f35b81d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1883,13 +1883,6 @@ W: http://bcache.evilpiepirate.org S: Maintained: F: drivers/md/bcache/ -BECEEM BCS200/BCS220-3/BCSM250 WIMAX SUPPORT -M: Kevin McKinney -M: Matthias Beyer -L: devel@driverdev.osuosl.org -S: Maintained -F: drivers/staging/bcm* - BEFS FILE SYSTEM S: Orphan F: Documentation/filesystems/befs.txt diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 80ad2078eea2..3b01fef54a35 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -62,8 +62,6 @@ source "drivers/staging/xgifb/Kconfig" source "drivers/staging/emxx_udc/Kconfig" -source "drivers/staging/bcm/Kconfig" - source "drivers/staging/ft1000/Kconfig" source "drivers/staging/speakup/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 17aa2231e208..26ecead09901 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -25,7 +25,6 @@ obj-$(CONFIG_VME_BUS) += vme/ obj-$(CONFIG_IIO) += iio/ obj-$(CONFIG_FB_XGI) += xgifb/ obj-$(CONFIG_USB_EMXX) += emxx_udc/ -obj-$(CONFIG_BCM_WIMAX) += bcm/ obj-$(CONFIG_FT1000) += ft1000/ obj-$(CONFIG_SPEAKUP) += speakup/ obj-$(CONFIG_TOUCHSCREEN_CLEARPAD_TM1217) += cptm1217/ diff --git a/drivers/staging/bcm/Adapter.h b/drivers/staging/bcm/Adapter.h deleted file mode 100644 index 940c852e17b7..000000000000 --- a/drivers/staging/bcm/Adapter.h +++ /dev/null @@ -1,474 +0,0 @@ -/*********************************** -* Adapter.h -************************************/ -#ifndef __ADAPTER_H__ -#define __ADAPTER_H__ - -#define MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES 256 -#include "Debug.h" - -struct bcm_leader { - USHORT Vcid; - USHORT PLength; - UCHAR Status; - UCHAR Unused[3]; -} __packed; - -struct bcm_packettosend { - struct bcm_leader Leader; - UCHAR ucPayload; -} __packed; - -struct bcm_control_packet { - PVOID ControlBuff; - UINT ControlBuffLen; - struct bcm_control_packet *next; -} __packed; - -struct bcm_link_request { - struct bcm_leader Leader; - UCHAR szData[4]; -} __packed; - -#define MAX_IP_RANGE_LENGTH 4 -#define MAX_PORT_RANGE 4 -#define MAX_PROTOCOL_LENGTH 32 -#define IPV6_ADDRESS_SIZEINBYTES 0x10 - -union u_ip_address { - struct { - /* Source Ip Address Range */ - ULONG ulIpv4Addr[MAX_IP_RANGE_LENGTH]; - /* Source Ip Mask Address Range */ - ULONG ulIpv4Mask[MAX_IP_RANGE_LENGTH]; - }; - struct { - /* Source Ip Address Range */ - ULONG ulIpv6Addr[MAX_IP_RANGE_LENGTH * 4]; - /* Source Ip Mask Address Range */ - ULONG ulIpv6Mask[MAX_IP_RANGE_LENGTH * 4]; - }; - struct { - UCHAR ucIpv4Address[MAX_IP_RANGE_LENGTH * IP_LENGTH_OF_ADDRESS]; - UCHAR ucIpv4Mask[MAX_IP_RANGE_LENGTH * IP_LENGTH_OF_ADDRESS]; - }; - struct { - UCHAR ucIpv6Address[MAX_IP_RANGE_LENGTH * IPV6_ADDRESS_SIZEINBYTES]; - UCHAR ucIpv6Mask[MAX_IP_RANGE_LENGTH * IPV6_ADDRESS_SIZEINBYTES]; - }; -}; - -struct bcm_hdr_suppression_contextinfo { - /* Intermediate buffer to accumulate pkt Header for PHS */ - UCHAR ucaHdrSuppressionInBuf[MAX_PHS_LENGTHS]; - /* Intermediate buffer containing pkt Header after PHS */ - UCHAR ucaHdrSuppressionOutBuf[MAX_PHS_LENGTHS + PHSI_LEN]; -}; - -struct bcm_classifier_rule { - ULONG ulSFID; - UCHAR ucReserved[2]; - B_UINT16 uiClassifierRuleIndex; - bool bUsed; - USHORT usVCID_Value; - /* This field detemines the Classifier Priority */ - B_UINT8 u8ClassifierRulePriority; - union u_ip_address stSrcIpAddress; - UCHAR ucIPSourceAddressLength; /* Ip Source Address Length */ - - union u_ip_address stDestIpAddress; - /* Ip Destination Address Length */ - UCHAR ucIPDestinationAddressLength; - UCHAR ucIPTypeOfServiceLength; /* Type of service Length */ - UCHAR ucTosLow; /* Tos Low */ - UCHAR ucTosHigh; /* Tos High */ - UCHAR ucTosMask; /* Tos Mask */ - - UCHAR ucProtocolLength; /* protocol Length */ - UCHAR ucProtocol[MAX_PROTOCOL_LENGTH]; /* protocol Length */ - USHORT usSrcPortRangeLo[MAX_PORT_RANGE]; - USHORT usSrcPortRangeHi[MAX_PORT_RANGE]; - UCHAR ucSrcPortRangeLength; - - USHORT usDestPortRangeLo[MAX_PORT_RANGE]; - USHORT usDestPortRangeHi[MAX_PORT_RANGE]; - UCHAR ucDestPortRangeLength; - - bool bProtocolValid; - bool bTOSValid; - bool bDestIpValid; - bool bSrcIpValid; - - /* For IPv6 Addressing */ - UCHAR ucDirection; - bool bIpv6Protocol; - UINT32 u32PHSRuleID; - struct bcm_phs_rule sPhsRule; - UCHAR u8AssociatedPHSI; - - /* Classification fields for ETH CS */ - UCHAR ucEthCSSrcMACLen; - UCHAR au8EThCSSrcMAC[MAC_ADDRESS_SIZE]; - UCHAR au8EThCSSrcMACMask[MAC_ADDRESS_SIZE]; - UCHAR ucEthCSDestMACLen; - UCHAR au8EThCSDestMAC[MAC_ADDRESS_SIZE]; - UCHAR au8EThCSDestMACMask[MAC_ADDRESS_SIZE]; - UCHAR ucEtherTypeLen; - UCHAR au8EthCSEtherType[NUM_ETHERTYPE_BYTES]; - UCHAR usUserPriority[2]; - USHORT usVLANID; - USHORT usValidityBitMap; -}; - -struct bcm_fragmented_packet_info { - bool bUsed; - ULONG ulSrcIpAddress; - USHORT usIpIdentification; - struct bcm_classifier_rule *pstMatchedClassifierEntry; - bool bOutOfOrderFragment; -}; - -struct bcm_packet_info { - /* classification extension Rule */ - ULONG ulSFID; - USHORT usVCID_Value; - UINT uiThreshold; - /* This field determines the priority of the SF Queues */ - B_UINT8 u8TrafficPriority; - - bool bValid; - bool bActive; - bool bActivateRequestSent; - - B_UINT8 u8QueueType; /* BE or rtPS */ - - /* maximum size of the bucket for the queue */ - UINT uiMaxBucketSize; - UINT uiCurrentQueueDepthOnTarget; - UINT uiCurrentBytesOnHost; - UINT uiCurrentPacketsOnHost; - UINT uiDroppedCountBytes; - UINT uiDroppedCountPackets; - UINT uiSentBytes; - UINT uiSentPackets; - UINT uiCurrentDrainRate; - UINT uiThisPeriodSentBytes; - LARGE_INTEGER liDrainCalculated; - UINT uiCurrentTokenCount; - LARGE_INTEGER liLastUpdateTokenAt; - UINT uiMaxAllowedRate; - UINT NumOfPacketsSent; - UCHAR ucDirection; - USHORT usCID; - struct bcm_mibs_parameters stMibsExtServiceFlowTable; - UINT uiCurrentRxRate; - UINT uiThisPeriodRxBytes; - UINT uiTotalRxBytes; - UINT uiTotalTxBytes; - UINT uiPendedLast; - UCHAR ucIpVersion; - - union { - struct { - struct sk_buff *FirstTxQueue; - struct sk_buff *LastTxQueue; - }; - struct { - struct sk_buff *ControlHead; - struct sk_buff *ControlTail; - }; - }; - - bool bProtocolValid; - bool bTOSValid; - bool bDestIpValid; - bool bSrcIpValid; - - bool bActiveSet; - bool bAdmittedSet; - bool bAuthorizedSet; - bool bClassifierPriority; - UCHAR ucServiceClassName[MAX_CLASS_NAME_LENGTH]; - bool bHeaderSuppressionEnabled; - spinlock_t SFQueueLock; - void *pstSFIndication; - struct timeval stLastUpdateTokenAt; - atomic_t uiPerSFTxResourceCount; - UINT uiMaxLatency; - UCHAR bIPCSSupport; - UCHAR bEthCSSupport; -}; - -struct bcm_tarang_data { - struct bcm_tarang_data *next; - struct bcm_mini_adapter *Adapter; - struct sk_buff *RxAppControlHead; - struct sk_buff *RxAppControlTail; - int AppCtrlQueueLen; - bool MacTracingEnabled; - bool bApplicationToExit; - struct bcm_mibs_dropped_cntrl_msg stDroppedAppCntrlMsgs; - ULONG RxCntrlMsgBitMask; -}; - -struct bcm_targetdsx_buffer { - ULONG ulTargetDsxBuffer; - B_UINT16 tid; - bool valid; -}; - -typedef int (*FP_FLASH_WRITE)(struct bcm_mini_adapter *, UINT, PVOID); - -typedef int (*FP_FLASH_WRITE_STATUS)(struct bcm_mini_adapter *, UINT, PVOID); - -/* - * Driver adapter data structure - */ -struct bcm_mini_adapter { - struct bcm_mini_adapter *next; - struct net_device *dev; - u32 msg_enable; - CHAR *caDsxReqResp; - atomic_t ApplicationRunning; - bool AppCtrlQueueOverFlow; - atomic_t CurrentApplicationCount; - atomic_t RegisteredApplicationCount; - bool LinkUpStatus; - bool TimerActive; - u32 StatisticsPointer; - struct sk_buff *RxControlHead; - struct sk_buff *RxControlTail; - struct semaphore RxAppControlQueuelock; - struct semaphore fw_download_sema; - struct bcm_tarang_data *pTarangs; - spinlock_t control_queue_lock; - wait_queue_head_t process_read_wait_queue; - - /* the pointer to the first packet we have queued in send - * deserialized miniport support variables - */ - atomic_t TotalPacketCount; - atomic_t TxPktAvail; - - /* this to keep track of the Tx and Rx MailBox Registers. */ - atomic_t CurrNumFreeTxDesc; - /* to keep track the no of byte received */ - USHORT PrevNumRecvDescs; - USHORT CurrNumRecvDescs; - UINT u32TotalDSD; - struct bcm_packet_info PackInfo[NO_OF_QUEUES]; - struct bcm_classifier_rule astClassifierTable[MAX_CLASSIFIERS]; - bool TransferMode; - - /*************** qos ******************/ - bool bETHCSEnabled; - ULONG BEBucketSize; - ULONG rtPSBucketSize; - UCHAR LinkStatus; - bool AutoLinkUp; - bool AutoSyncup; - - int major; - int minor; - wait_queue_head_t tx_packet_wait_queue; - wait_queue_head_t process_rx_cntrlpkt; - atomic_t process_waiting; - bool fw_download_done; - - char *txctlpacket[MAX_CNTRL_PKTS]; - atomic_t cntrlpktCnt; - atomic_t index_app_read_cntrlpkt; - atomic_t index_wr_txcntrlpkt; - atomic_t index_rd_txcntrlpkt; - UINT index_datpkt; - struct semaphore rdmwrmsync; - - struct bcm_targetdsx_buffer astTargetDsxBuffer[MAX_TARGET_DSX_BUFFERS]; - ULONG ulFreeTargetBufferCnt; - ULONG ulCurrentTargetBuffer; - ULONG ulTotalTargetBuffersAvailable; - unsigned long chip_id; - wait_queue_head_t lowpower_mode_wait_queue; - bool bFlashBoot; - bool bBinDownloaded; - bool bCfgDownloaded; - bool bSyncUpRequestSent; - USHORT usBestEffortQueueIndex; - wait_queue_head_t ioctl_fw_dnld_wait_queue; - bool waiting_to_fw_download_done; - pid_t fw_download_process_pid; - struct bcm_target_params *pstargetparams; - bool device_removed; - bool DeviceAccess; - bool bIsAutoCorrectEnabled; - bool bDDRInitDone; - int DDRSetting; - ULONG ulPowerSaveMode; - spinlock_t txtransmitlock; - B_UINT8 txtransmit_running; - /* Thread for control packet handling */ - struct task_struct *control_packet_handler; - /* thread for transmitting packets. */ - struct task_struct *transmit_packet_thread; - - /* LED Related Structures */ - struct bcm_led_info LEDInfo; - - /* Driver State for LED Blinking */ - enum bcm_led_events DriverState; - /* Interface Specific */ - PVOID pvInterfaceAdapter; - int (*bcm_file_download)(PVOID, - struct file *, - unsigned int); - int (*bcm_file_readback_from_chip)(PVOID, - struct file *, - unsigned int); - int (*interface_rdm)(PVOID, - UINT, - PVOID, - int); - int (*interface_wrm)(PVOID, - UINT, - PVOID, - int); - int (*interface_transmit)(PVOID, PVOID , UINT); - bool IdleMode; - bool bDregRequestSentInIdleMode; - bool bTriedToWakeUpFromlowPowerMode; - bool bShutStatus; - bool bWakeUpDevice; - unsigned int usIdleModePattern; - /* BOOLEAN bTriedToWakeUpFromShutdown; */ - bool bLinkDownRequested; - int downloadDDR; - struct bcm_phs_extension stBCMPhsContext; - struct bcm_hdr_suppression_contextinfo stPhsTxContextInfo; - uint8_t ucaPHSPktRestoreBuf[2048]; - uint8_t bPHSEnabled; - bool AutoFirmDld; - bool bMipsConfig; - bool bDPLLConfig; - UINT32 aTxPktSizeHist[MIBS_MAX_HIST_ENTRIES]; - UINT32 aRxPktSizeHist[MIBS_MAX_HIST_ENTRIES]; - struct bcm_fragmented_packet_info - astFragmentedPktClassifierTable[MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES]; - atomic_t uiMBupdate; - UINT32 PmuMode; - enum bcm_nvm_type eNVMType; - UINT uiSectorSize; - UINT uiSectorSizeInCFG; - bool bSectorSizeOverride; - bool bStatusWrite; - UINT uiNVMDSDSize; - UINT uiVendorExtnFlag; - /* it will always represent chosen DSD at any point of time. - * Generally it is Active DSD but in case of NVM RD/WR it - * might be different. - */ - UINT ulFlashCalStart; - ULONG ulFlashControlSectionStart; - ULONG ulFlashWriteSize; - ULONG ulFlashID; - FP_FLASH_WRITE fpFlashWrite; - FP_FLASH_WRITE_STATUS fpFlashWriteWithStatusCheck; - - struct semaphore NVMRdmWrmLock; - struct device *pstCreatedClassDevice; - - /* BOOLEAN InterfaceUpStatus; */ - struct bcm_flash2x_cs_info *psFlash2xCSInfo; - struct bcm_flash_cs_info *psFlashCSInfo; - struct bcm_flash2x_vendor_info *psFlash2xVendorInfo; - UINT uiFlashBaseAdd; /* Flash start address */ - /* Active ISO offset chosen before f/w download */ - UINT uiActiveISOOffset; - enum bcm_flash2x_section_val eActiveISO; /* Active ISO section val */ - /* Active DSD val chosen before f/w download */ - enum bcm_flash2x_section_val eActiveDSD; - /* For accessing Active DSD chosen before f/w download */ - UINT uiActiveDSDOffsetAtFwDld; - UINT uiFlashLayoutMajorVersion; - UINT uiFlashLayoutMinorVersion; - bool bAllDSDWriteAllow; - bool bSigCorrupted; - /* this should be set who so ever want to change the Headers. - * after Write it should be reset immediately. - */ - bool bHeaderChangeAllowed; - int SelectedChip; - bool bEndPointHalted; - /* while bFlashRawRead will be true, Driver - * ignore map lay out and consider flash as of without any map. - */ - bool bFlashRawRead; - bool bPreparingForLowPowerMode; - bool bDoSuspend; - UINT syscfgBefFwDld; - bool StopAllXaction; - /* Used to Support extended CAPI requirements from */ - UINT32 liTimeSinceLastNetEntry; - struct semaphore LowPowerModeSync; - ULONG liDrainCalculated; - UINT gpioBitMap; - struct bcm_debug_state stDebugState; -}; - -#define GET_BCM_ADAPTER(net_dev) netdev_priv(net_dev) - -struct bcm_eth_header { - UCHAR au8DestinationAddress[6]; - UCHAR au8SourceAddress[6]; - USHORT u16Etype; -} __packed; - -struct bcm_firmware_info { - void __user *pvMappedFirmwareAddress; - ULONG u32FirmwareLength; - ULONG u32StartingAddress; -} __packed; - -/* holds the value of net_device structure.. */ -extern struct net_device *gblpnetdev; - -struct bcm_ddr_setting { - UINT ulRegAddress; - UINT ulRegValue; -}; -int InitAdapter(struct bcm_mini_adapter *psAdapter); - -/* ===================================================================== - * Beceem vendor request codes for EP0 - * ===================================================================== - */ - -#define BCM_REQUEST_READ 0x2 -#define BCM_REQUEST_WRITE 0x1 -#define EP2_MPS_REG 0x0F0110A0 -#define EP2_MPS 0x40 - -#define EP2_CFG_REG 0x0F0110A8 -#define EP2_CFG_INT 0x27 -#define EP2_CFG_BULK 0x25 - -#define EP4_MPS_REG 0x0F0110F0 -#define EP4_MPS 0x8C - -#define EP4_CFG_REG 0x0F0110F8 - -#define ISO_MPS_REG 0x0F0110C8 -#define ISO_MPS 0x00000000 - -#define EP1 0 -#define EP2 1 -#define EP3 2 -#define EP4 3 -#define EP5 4 -#define EP6 5 - -enum bcm_einterface_setting { - DEFAULT_SETTING_0 = 0, - ALTERNATE_SETTING_1 = 1, -}; - -#endif /* __ADAPTER_H__ */ diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c deleted file mode 100644 index 88ce2da531c6..000000000000 --- a/drivers/staging/bcm/Bcmchar.c +++ /dev/null @@ -1,2652 +0,0 @@ -#include - -#include "headers.h" - -static int bcm_handle_nvm_read_cmd(struct bcm_mini_adapter *ad, - PUCHAR read_data, - struct bcm_nvm_readwrite *nvm_rw) -{ - INT status = STATUS_FAILURE; - - down(&ad->NVMRdmWrmLock); - - if ((ad->IdleMode == TRUE) || (ad->bShutStatus == TRUE) || - (ad->bPreparingForLowPowerMode == TRUE)) { - - BCM_DEBUG_PRINT(ad, - DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "Device is in Idle/Shutdown Mode\n"); - up(&ad->NVMRdmWrmLock); - kfree(read_data); - return -EACCES; - } - - status = BeceemNVMRead(ad, (PUINT)read_data, - nvm_rw->uiOffset, - nvm_rw->uiNumBytes); - up(&ad->NVMRdmWrmLock); - - if (status != STATUS_SUCCESS) { - kfree(read_data); - return status; - } - - if (copy_to_user(nvm_rw->pBuffer, read_data, nvm_rw->uiNumBytes)) { - kfree(read_data); - return -EFAULT; - } - - return STATUS_SUCCESS; -} - -static int handle_flash2x_adapter(struct bcm_mini_adapter *ad, - PUCHAR read_data, - struct bcm_nvm_readwrite *nvm_rw) -{ - /* - * New Requirement:- - * DSD section updation will be allowed in two case:- - * 1. if DSD sig is present in DSD header means dongle - * is ok and updation is fruitfull - * 2. if point 1 failes then user buff should have - * DSD sig. this point ensures that if dongle is - * corrupted then user space program first modify - * the DSD header with valid DSD sig so that this - * as well as further write may be worthwhile. - * - * This restriction has been put assuming that - * if DSD sig is corrupted, DSD data won't be - * considered valid. - */ - INT status; - ULONG dsd_magic_num_in_usr_buff = 0; - - status = BcmFlash2xCorruptSig(ad, ad->eActiveDSD); - if (status == STATUS_SUCCESS) - return STATUS_SUCCESS; - - if (((nvm_rw->uiOffset + nvm_rw->uiNumBytes) != - ad->uiNVMDSDSize) || - (nvm_rw->uiNumBytes < SIGNATURE_SIZE)) { - - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "DSD Sig is present neither in Flash nor User provided Input.."); - up(&ad->NVMRdmWrmLock); - kfree(read_data); - return status; - } - - dsd_magic_num_in_usr_buff = - ntohl(*(PUINT)(read_data + nvm_rw->uiNumBytes - - SIGNATURE_SIZE)); - if (dsd_magic_num_in_usr_buff != DSD_IMAGE_MAGIC_NUMBER) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "DSD Sig is present neither in Flash nor User provided Input.."); - up(&ad->NVMRdmWrmLock); - kfree(read_data); - return status; - } - - return STATUS_SUCCESS; -} - -/*************************************************************** -* Function - bcm_char_open() -* -* Description - This is the "open" entry point for the character -* driver. -* -* Parameters - inode: Pointer to the Inode structure of char device -* filp : File pointer of the char device -* -* Returns - Zero(Success) -****************************************************************/ - -static int bcm_char_open(struct inode *inode, struct file *filp) -{ - struct bcm_mini_adapter *ad = NULL; - struct bcm_tarang_data *tarang = NULL; - - ad = GET_BCM_ADAPTER(gblpnetdev); - tarang = kzalloc(sizeof(struct bcm_tarang_data), GFP_KERNEL); - if (!tarang) - return -ENOMEM; - - tarang->Adapter = ad; - tarang->RxCntrlMsgBitMask = 0xFFFFFFFF & ~(1 << 0xB); - - down(&ad->RxAppControlQueuelock); - tarang->next = ad->pTarangs; - ad->pTarangs = tarang; - up(&ad->RxAppControlQueuelock); - - /* Store the Adapter structure */ - filp->private_data = tarang; - - /* Start Queuing the control response Packets */ - atomic_inc(&ad->ApplicationRunning); - - nonseekable_open(inode, filp); - return 0; -} - -static int bcm_char_release(struct inode *inode, struct file *filp) -{ - struct bcm_tarang_data *tarang, *tmp, *ptmp; - struct bcm_mini_adapter *ad = NULL; - struct sk_buff *pkt, *npkt; - - tarang = (struct bcm_tarang_data *)filp->private_data; - - if (tarang == NULL) - return 0; - - ad = tarang->Adapter; - - down(&ad->RxAppControlQueuelock); - - tmp = ad->pTarangs; - for (ptmp = NULL; tmp; ptmp = tmp, tmp = tmp->next) { - if (tmp == tarang) - break; - } - - if (tmp) { - if (!ptmp) - ad->pTarangs = tmp->next; - else - ptmp->next = tmp->next; - } else { - up(&ad->RxAppControlQueuelock); - return 0; - } - - pkt = tarang->RxAppControlHead; - while (pkt) { - npkt = pkt->next; - kfree_skb(pkt); - pkt = npkt; - } - - up(&ad->RxAppControlQueuelock); - - /* Stop Queuing the control response Packets */ - atomic_dec(&ad->ApplicationRunning); - - kfree(tarang); - - /* remove this filp from the asynchronously notified filp's */ - filp->private_data = NULL; - return 0; -} - -static ssize_t bcm_char_read(struct file *filp, - char __user *buf, - size_t size, - loff_t *f_pos) -{ - struct bcm_tarang_data *tarang = filp->private_data; - struct bcm_mini_adapter *ad = tarang->Adapter; - struct sk_buff *packet = NULL; - ssize_t pkt_len = 0; - int wait_ret_val = 0; - unsigned long ret = 0; - - wait_ret_val = wait_event_interruptible( - ad->process_read_wait_queue, - (tarang->RxAppControlHead || - ad->device_removed)); - - if ((wait_ret_val == -ERESTARTSYS)) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "Exiting as i've been asked to exit!!!\n"); - return wait_ret_val; - } - - if (ad->device_removed) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "Device Removed... Killing the Apps...\n"); - return -ENODEV; - } - - if (false == ad->fw_download_done) - return -EACCES; - - down(&ad->RxAppControlQueuelock); - - if (tarang->RxAppControlHead) { - packet = tarang->RxAppControlHead; - DEQUEUEPACKET(tarang->RxAppControlHead, - tarang->RxAppControlTail); - tarang->AppCtrlQueueLen--; - } - - up(&ad->RxAppControlQueuelock); - - if (packet) { - pkt_len = packet->len; - ret = copy_to_user(buf, packet->data, - min_t(size_t, pkt_len, size)); - if (ret) { - dev_kfree_skb(packet); - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Returning from copy to user failure\n"); - return -EFAULT; - } - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "Read %zd Bytes From Adapter packet = %p by process %d!\n", - pkt_len, packet, current->pid); - dev_kfree_skb(packet); - } - - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "<\n"); - return pkt_len; -} - -static int bcm_char_ioctl_reg_read_private(void __user *argp, - struct bcm_mini_adapter *ad) -{ - struct bcm_rdm_buffer rdm_buff = {0}; - struct bcm_ioctl_buffer io_buff; - PCHAR temp_buff; - INT status = STATUS_FAILURE; - UINT buff_len; - u16 temp_value; - int bytes; - - /* Copy Ioctl Buffer structure */ - if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) - return -EFAULT; - - if (io_buff.InputLength > sizeof(rdm_buff)) - return -EINVAL; - - if (copy_from_user(&rdm_buff, io_buff.InputBuffer, - io_buff.InputLength)) - return -EFAULT; - - if (io_buff.OutputLength > USHRT_MAX || - io_buff.OutputLength == 0) { - return -EINVAL; - } - - buff_len = io_buff.OutputLength; - temp_value = 4 - (buff_len % 4); - buff_len += temp_value % 4; - - temp_buff = kmalloc(buff_len, GFP_KERNEL); - if (!temp_buff) - return -ENOMEM; - - bytes = rdmalt(ad, (UINT)rdm_buff.Register, - (PUINT)temp_buff, buff_len); - if (bytes > 0) { - status = STATUS_SUCCESS; - if (copy_to_user(io_buff.OutputBuffer, temp_buff, bytes)) { - kfree(temp_buff); - return -EFAULT; - } - } else { - status = bytes; - } - - kfree(temp_buff); - return status; -} - -static int bcm_char_ioctl_reg_write_private(void __user *argp, - struct bcm_mini_adapter *ad) -{ - struct bcm_wrm_buffer wrm_buff = {0}; - struct bcm_ioctl_buffer io_buff; - UINT tmp = 0; - INT status; - - /* Copy Ioctl Buffer structure */ - - if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) - return -EFAULT; - - if (io_buff.InputLength > sizeof(wrm_buff)) - return -EINVAL; - - /* Get WrmBuffer structure */ - if (copy_from_user(&wrm_buff, io_buff.InputBuffer, - io_buff.InputLength)) - return -EFAULT; - - tmp = wrm_buff.Register & EEPROM_REJECT_MASK; - if (!((ad->pstargetparams->m_u32Customize) & VSG_MODE) && - ((tmp == EEPROM_REJECT_REG_1) || - (tmp == EEPROM_REJECT_REG_2) || - (tmp == EEPROM_REJECT_REG_3) || - (tmp == EEPROM_REJECT_REG_4))) { - - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "EEPROM Access Denied, not in VSG Mode\n"); - return -EFAULT; - } - - status = wrmalt(ad, (UINT)wrm_buff.Register, - (PUINT)wrm_buff.Data, sizeof(ULONG)); - - if (status == STATUS_SUCCESS) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, - DBG_LVL_ALL, "WRM Done\n"); - } else { - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, - DBG_LVL_ALL, "WRM Failed\n"); - status = -EFAULT; - } - return status; -} - -static int bcm_char_ioctl_eeprom_reg_read(void __user *argp, - struct bcm_mini_adapter *ad) -{ - struct bcm_rdm_buffer rdm_buff = {0}; - struct bcm_ioctl_buffer io_buff; - PCHAR temp_buff = NULL; - UINT tmp = 0; - INT status; - int bytes; - - if ((ad->IdleMode == TRUE) || - (ad->bShutStatus == TRUE) || - (ad->bPreparingForLowPowerMode == TRUE)) { - - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Device in Idle Mode, Blocking Rdms\n"); - return -EACCES; - } - - /* Copy Ioctl Buffer structure */ - if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) - return -EFAULT; - - if (io_buff.InputLength > sizeof(rdm_buff)) - return -EINVAL; - - if (copy_from_user(&rdm_buff, io_buff.InputBuffer, - io_buff.InputLength)) - return -EFAULT; - - if (io_buff.OutputLength > USHRT_MAX || - io_buff.OutputLength == 0) { - return -EINVAL; - } - - temp_buff = kmalloc(io_buff.OutputLength, GFP_KERNEL); - if (!temp_buff) - return STATUS_FAILURE; - - if ((((ULONG)rdm_buff.Register & 0x0F000000) != 0x0F000000) || - ((ULONG)rdm_buff.Register & 0x3)) { - - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "RDM Done On invalid Address : %x Access Denied.\n", - (int)rdm_buff.Register); - - kfree(temp_buff); - return -EINVAL; - } - - tmp = rdm_buff.Register & EEPROM_REJECT_MASK; - bytes = rdmaltWithLock(ad, (UINT)rdm_buff.Register, - (PUINT)temp_buff, io_buff.OutputLength); - - if (bytes > 0) { - status = STATUS_SUCCESS; - if (copy_to_user(io_buff.OutputBuffer, temp_buff, bytes)) { - kfree(temp_buff); - return -EFAULT; - } - } else { - status = bytes; - } - - kfree(temp_buff); - return status; -} - -static int bcm_char_ioctl_eeprom_reg_write(void __user *argp, - struct bcm_mini_adapter *ad, - UINT cmd) -{ - struct bcm_wrm_buffer wrm_buff = {0}; - struct bcm_ioctl_buffer io_buff; - UINT tmp = 0; - INT status; - - if ((ad->IdleMode == TRUE) || - (ad->bShutStatus == TRUE) || - (ad->bPreparingForLowPowerMode == TRUE)) { - - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Device in Idle Mode, Blocking Wrms\n"); - return -EACCES; - } - - /* Copy Ioctl Buffer structure */ - if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) - return -EFAULT; - - if (io_buff.InputLength > sizeof(wrm_buff)) - return -EINVAL; - - /* Get WrmBuffer structure */ - if (copy_from_user(&wrm_buff, io_buff.InputBuffer, - io_buff.InputLength)) - return -EFAULT; - - if ((((ULONG)wrm_buff.Register & 0x0F000000) != 0x0F000000) || - ((ULONG)wrm_buff.Register & 0x3)) { - - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "WRM Done On invalid Address : %x Access Denied.\n", - (int)wrm_buff.Register); - return -EINVAL; - } - - tmp = wrm_buff.Register & EEPROM_REJECT_MASK; - if (!((ad->pstargetparams->m_u32Customize) & VSG_MODE) && - ((tmp == EEPROM_REJECT_REG_1) || - (tmp == EEPROM_REJECT_REG_2) || - (tmp == EEPROM_REJECT_REG_3) || - (tmp == EEPROM_REJECT_REG_4)) && - (cmd == IOCTL_BCM_REGISTER_WRITE)) { - - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "EEPROM Access Denied, not in VSG Mode\n"); - return -EFAULT; - } - - status = wrmaltWithLock(ad, (UINT)wrm_buff.Register, - (PUINT)wrm_buff.Data, - wrm_buff.Length); - - if (status == STATUS_SUCCESS) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, OSAL_DBG, - DBG_LVL_ALL, "WRM Done\n"); - } else { - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, - DBG_LVL_ALL, "WRM Failed\n"); - status = -EFAULT; - } - return status; -} - -static int bcm_char_ioctl_gpio_set_request(void __user *argp, - struct bcm_mini_adapter *ad) -{ - struct bcm_gpio_info gpio_info = {0}; - struct bcm_ioctl_buffer io_buff; - UCHAR reset_val[4]; - UINT value = 0; - UINT bit = 0; - UINT operation = 0; - INT status; - int bytes; - - if ((ad->IdleMode == TRUE) || - (ad->bShutStatus == TRUE) || - (ad->bPreparingForLowPowerMode == TRUE)) { - - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, - DBG_LVL_ALL, - "GPIO Can't be set/clear in Low power Mode"); - return -EACCES; - } - - if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) - return -EFAULT; - - if (io_buff.InputLength > sizeof(gpio_info)) - return -EINVAL; - - if (copy_from_user(&gpio_info, io_buff.InputBuffer, - io_buff.InputLength)) - return -EFAULT; - - bit = gpio_info.uiGpioNumber; - operation = gpio_info.uiGpioValue; - value = (1< is not correspond to LED !!!", - value); - return -EINVAL; - } - - /* Set - setting 1 */ - if (operation) { - /* Set the gpio output register */ - status = wrmaltWithLock(ad, - BCM_GPIO_OUTPUT_SET_REG, - (PUINT)(&value), sizeof(UINT)); - - if (status == STATUS_SUCCESS) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, - OSAL_DBG, DBG_LVL_ALL, - "Set the GPIO bit\n"); - } else { - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, - OSAL_DBG, DBG_LVL_ALL, - "Failed to set the %dth GPIO\n", - bit); - return status; - } - } else { - /* Set the gpio output register */ - status = wrmaltWithLock(ad, - BCM_GPIO_OUTPUT_CLR_REG, - (PUINT)(&value), sizeof(UINT)); - - if (status == STATUS_SUCCESS) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, - OSAL_DBG, DBG_LVL_ALL, - "Set the GPIO bit\n"); - } else { - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, - OSAL_DBG, DBG_LVL_ALL, - "Failed to clear the %dth GPIO\n", - bit); - return status; - } - } - - bytes = rdmaltWithLock(ad, (UINT)GPIO_MODE_REGISTER, - (PUINT)reset_val, sizeof(UINT)); - if (bytes < 0) { - status = bytes; - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "GPIO_MODE_REGISTER read failed"); - return status; - } - status = STATUS_SUCCESS; - - /* Set the gpio mode register to output */ - *(UINT *)reset_val |= (1<IdleMode == TRUE) || - (ad->bShutStatus == TRUE) || - (ad->bPreparingForLowPowerMode == TRUE)) { - - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, - DBG_LVL_ALL, - "GPIO Can't be set/clear in Low power Mode"); - return -EACCES; - } - - if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) - return -EFAULT; - - if (io_buff.InputLength > sizeof(thread_req)) - return -EINVAL; - - if (copy_from_user(&thread_req, io_buff.InputBuffer, - io_buff.InputLength)) - return -EFAULT; - - /* if LED thread is running(Actively or Inactively) - * set it state to make inactive - */ - if (ad->LEDInfo.led_thread_running) { - if (thread_req.ThreadState == LED_THREAD_ACTIVATION_REQ) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, - OSAL_DBG, DBG_LVL_ALL, - "Activating thread req"); - ad->DriverState = LED_THREAD_ACTIVE; - } else { - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, - OSAL_DBG, DBG_LVL_ALL, - "DeActivating Thread req....."); - ad->DriverState = LED_THREAD_INACTIVE; - } - - /* signal thread. */ - wake_up(&ad->LEDInfo.notify_led_event); - } - return STATUS_SUCCESS; -} - -static int bcm_char_ioctl_gpio_status_request(void __user *argp, - struct bcm_mini_adapter *ad) -{ - struct bcm_gpio_info gpio_info = {0}; - struct bcm_ioctl_buffer io_buff; - ULONG bit = 0; - UCHAR read[4]; - INT status; - int bytes; - - if ((ad->IdleMode == TRUE) || - (ad->bShutStatus == TRUE) || - (ad->bPreparingForLowPowerMode == TRUE)) - return -EACCES; - - if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) - return -EFAULT; - - if (io_buff.InputLength > sizeof(gpio_info)) - return -EINVAL; - - if (copy_from_user(&gpio_info, io_buff.InputBuffer, - io_buff.InputLength)) - return -EFAULT; - - bit = gpio_info.uiGpioNumber; - - /* Set the gpio output register */ - bytes = rdmaltWithLock(ad, (UINT)GPIO_PIN_STATE_REGISTER, - (PUINT)read, sizeof(UINT)); - - if (bytes < 0) { - status = bytes; - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "RDM Failed\n"); - return status; - } - status = STATUS_SUCCESS; - return status; -} - -static int bcm_char_ioctl_gpio_multi_request(void __user *argp, - struct bcm_mini_adapter *ad) -{ - struct bcm_gpio_multi_info gpio_multi_info[MAX_IDX]; - struct bcm_gpio_multi_info *pgpio_multi_info = - (struct bcm_gpio_multi_info *)gpio_multi_info; - struct bcm_ioctl_buffer io_buff; - UCHAR reset_val[4]; - INT status = STATUS_FAILURE; - int bytes; - - memset(pgpio_multi_info, 0, - MAX_IDX * sizeof(struct bcm_gpio_multi_info)); - - if ((ad->IdleMode == TRUE) || - (ad->bShutStatus == TRUE) || - (ad->bPreparingForLowPowerMode == TRUE)) - return -EINVAL; - - if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) - return -EFAULT; - - if (io_buff.InputLength > sizeof(gpio_multi_info)) - return -EINVAL; - if (io_buff.OutputLength > sizeof(gpio_multi_info)) - io_buff.OutputLength = sizeof(gpio_multi_info); - - if (copy_from_user(&gpio_multi_info, io_buff.InputBuffer, - io_buff.InputLength)) - return -EFAULT; - - if (IsReqGpioIsLedInNVM(ad, pgpio_multi_info[WIMAX_IDX].uiGPIOMask) - == false) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, - DBG_LVL_ALL, - "Sorry, Requested GPIO<0x%X> is not correspond to NVM LED bit map<0x%X>!!!", - pgpio_multi_info[WIMAX_IDX].uiGPIOMask, - ad->gpioBitMap); - return -EINVAL; - } - - /* Set the gpio output register */ - if ((pgpio_multi_info[WIMAX_IDX].uiGPIOMask) & - (pgpio_multi_info[WIMAX_IDX].uiGPIOCommand)) { - /* Set 1's in GPIO OUTPUT REGISTER */ - *(UINT *)reset_val = pgpio_multi_info[WIMAX_IDX].uiGPIOMask & - pgpio_multi_info[WIMAX_IDX].uiGPIOCommand & - pgpio_multi_info[WIMAX_IDX].uiGPIOValue; - - if (*(UINT *) reset_val) - status = wrmaltWithLock(ad, - BCM_GPIO_OUTPUT_SET_REG, - (PUINT)reset_val, sizeof(ULONG)); - - if (status != STATUS_SUCCESS) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "WRM to BCM_GPIO_OUTPUT_SET_REG Failed."); - return status; - } - - /* Clear to 0's in GPIO OUTPUT REGISTER */ - *(UINT *)reset_val = - (pgpio_multi_info[WIMAX_IDX].uiGPIOMask & - pgpio_multi_info[WIMAX_IDX].uiGPIOCommand & - (~(pgpio_multi_info[WIMAX_IDX].uiGPIOValue))); - - if (*(UINT *) reset_val) - status = wrmaltWithLock(ad, - BCM_GPIO_OUTPUT_CLR_REG, (PUINT)reset_val, - sizeof(ULONG)); - - if (status != STATUS_SUCCESS) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "WRM to BCM_GPIO_OUTPUT_CLR_REG Failed."); - return status; - } - } - - if (pgpio_multi_info[WIMAX_IDX].uiGPIOMask) { - bytes = rdmaltWithLock(ad, (UINT)GPIO_PIN_STATE_REGISTER, - (PUINT)reset_val, sizeof(UINT)); - - if (bytes < 0) { - status = bytes; - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "RDM to GPIO_PIN_STATE_REGISTER Failed."); - return status; - } - status = STATUS_SUCCESS; - - pgpio_multi_info[WIMAX_IDX].uiGPIOValue = - (*(UINT *)reset_val & - pgpio_multi_info[WIMAX_IDX].uiGPIOMask); - } - - status = copy_to_user(io_buff.OutputBuffer, &gpio_multi_info, - io_buff.OutputLength); - if (status) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Failed while copying Content to IOBufer for user space err:%d", - status); - return -EFAULT; - } - return status; -} - -static int bcm_char_ioctl_gpio_mode_request(void __user *argp, - struct bcm_mini_adapter *ad) -{ - struct bcm_gpio_multi_mode gpio_multi_mode[MAX_IDX]; - struct bcm_gpio_multi_mode *pgpio_multi_mode = - (struct bcm_gpio_multi_mode *)gpio_multi_mode; - struct bcm_ioctl_buffer io_buff; - UCHAR reset_val[4]; - INT status; - int bytes; - - if ((ad->IdleMode == TRUE) || - (ad->bShutStatus == TRUE) || - (ad->bPreparingForLowPowerMode == TRUE)) - return -EINVAL; - - if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) - return -EFAULT; - - if (io_buff.InputLength > sizeof(gpio_multi_mode)) - return -EINVAL; - if (io_buff.OutputLength > sizeof(gpio_multi_mode)) - io_buff.OutputLength = sizeof(gpio_multi_mode); - - if (copy_from_user(&gpio_multi_mode, io_buff.InputBuffer, - io_buff.InputLength)) - return -EFAULT; - - bytes = rdmaltWithLock(ad, (UINT)GPIO_MODE_REGISTER, - (PUINT)reset_val, sizeof(UINT)); - - if (bytes < 0) { - status = bytes; - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Read of GPIO_MODE_REGISTER failed"); - return status; - } - status = STATUS_SUCCESS; - - /* Validating the request */ - if (IsReqGpioIsLedInNVM(ad, pgpio_multi_mode[WIMAX_IDX].uiGPIOMask) - == false) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "Sorry, Requested GPIO<0x%X> is not correspond to NVM LED bit map<0x%X>!!!", - pgpio_multi_mode[WIMAX_IDX].uiGPIOMask, - ad->gpioBitMap); - return -EINVAL; - } - - if (pgpio_multi_mode[WIMAX_IDX].uiGPIOMask) { - /* write all OUT's (1's) */ - *(UINT *) reset_val |= - (pgpio_multi_mode[WIMAX_IDX].uiGPIOMode & - pgpio_multi_mode[WIMAX_IDX].uiGPIOMask); - - /* write all IN's (0's) */ - *(UINT *) reset_val &= - ~((~pgpio_multi_mode[WIMAX_IDX].uiGPIOMode) & - pgpio_multi_mode[WIMAX_IDX].uiGPIOMask); - - /* Currently implemented return the modes of all GPIO's - * else needs to bit AND with mask - */ - pgpio_multi_mode[WIMAX_IDX].uiGPIOMode = *(UINT *)reset_val; - - status = wrmaltWithLock(ad, GPIO_MODE_REGISTER, - (PUINT)reset_val, sizeof(ULONG)); - if (status == STATUS_SUCCESS) { - BCM_DEBUG_PRINT(ad, - DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "WRM to GPIO_MODE_REGISTER Done"); - } else { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "WRM to GPIO_MODE_REGISTER Failed"); - return -EFAULT; - } - } else { - /* if uiGPIOMask is 0 then return mode register configuration */ - pgpio_multi_mode[WIMAX_IDX].uiGPIOMode = *(UINT *)reset_val; - } - - status = copy_to_user(io_buff.OutputBuffer, &gpio_multi_mode, - io_buff.OutputLength); - if (status) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Failed while copying Content to IOBufer for user space err:%d", - status); - return -EFAULT; - } - return status; -} - -static int bcm_char_ioctl_misc_request(void __user *argp, - struct bcm_mini_adapter *ad) -{ - struct bcm_ioctl_buffer io_buff; - PVOID buff = NULL; - INT status; - - /* Copy Ioctl Buffer structure */ - if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) - return -EFAULT; - - if (io_buff.InputLength < sizeof(struct bcm_link_request)) - return -EINVAL; - - if (io_buff.InputLength > MAX_CNTL_PKT_SIZE) - return -EINVAL; - - buff = memdup_user(io_buff.InputBuffer, - io_buff.InputLength); - if (IS_ERR(buff)) - return PTR_ERR(buff); - - down(&ad->LowPowerModeSync); - status = wait_event_interruptible_timeout( - ad->lowpower_mode_wait_queue, - !ad->bPreparingForLowPowerMode, - (1 * HZ)); - - if (status == -ERESTARTSYS) - goto cntrlEnd; - - if (ad->bPreparingForLowPowerMode) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "Preparing Idle Mode is still True - Hence Rejecting control message\n"); - status = STATUS_FAILURE; - goto cntrlEnd; - } - status = CopyBufferToControlPacket(ad, (PVOID)buff); - -cntrlEnd: - up(&ad->LowPowerModeSync); - kfree(buff); - return status; -} - -static int bcm_char_ioctl_buffer_download_start( - struct bcm_mini_adapter *ad) -{ - INT status; - - if (down_trylock(&ad->NVMRdmWrmLock)) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "IOCTL_BCM_CHIP_RESET not allowed as EEPROM Read/Write is in progress\n"); - return -EACCES; - } - - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Starting the firmware download PID =0x%x!!!!\n", - current->pid); - - if (down_trylock(&ad->fw_download_sema)) - return -EBUSY; - - ad->bBinDownloaded = false; - ad->fw_download_process_pid = current->pid; - ad->bCfgDownloaded = false; - ad->fw_download_done = false; - netif_carrier_off(ad->dev); - netif_stop_queue(ad->dev); - status = reset_card_proc(ad); - if (status) { - pr_err(PFX "%s: reset_card_proc Failed!\n", ad->dev->name); - up(&ad->fw_download_sema); - up(&ad->NVMRdmWrmLock); - return status; - } - mdelay(10); - - up(&ad->NVMRdmWrmLock); - return status; -} - -static int bcm_char_ioctl_buffer_download(void __user *argp, - struct bcm_mini_adapter *ad) -{ - struct bcm_firmware_info *fw_info = NULL; - struct bcm_ioctl_buffer io_buff; - INT status; - - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Starting the firmware download PID =0x%x!!!!\n", current->pid); - - if (!down_trylock(&ad->fw_download_sema)) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Invalid way to download buffer. Use Start and then call this!!!\n"); - up(&ad->fw_download_sema); - return -EINVAL; - } - - /* Copy Ioctl Buffer structure */ - if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) { - up(&ad->fw_download_sema); - return -EFAULT; - } - - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Length for FW DLD is : %lx\n", io_buff.InputLength); - - if (io_buff.InputLength > sizeof(struct bcm_firmware_info)) { - up(&ad->fw_download_sema); - return -EINVAL; - } - - fw_info = kmalloc(sizeof(*fw_info), GFP_KERNEL); - if (!fw_info) { - up(&ad->fw_download_sema); - return -ENOMEM; - } - - if (copy_from_user(fw_info, io_buff.InputBuffer, - io_buff.InputLength)) { - up(&ad->fw_download_sema); - kfree(fw_info); - return -EFAULT; - } - - if (!fw_info->pvMappedFirmwareAddress || - (fw_info->u32FirmwareLength == 0)) { - - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Something else is wrong %lu\n", - fw_info->u32FirmwareLength); - up(&ad->fw_download_sema); - kfree(fw_info); - status = -EINVAL; - return status; - } - - status = bcm_ioctl_fw_download(ad, fw_info); - - if (status != STATUS_SUCCESS) { - if (fw_info->u32StartingAddress == CONFIG_BEGIN_ADDR) - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "IOCTL: Configuration File Upload Failed\n"); - else - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "IOCTL: Firmware File Upload Failed\n"); - - /* up(&ad->fw_download_sema); */ - - if (ad->LEDInfo.led_thread_running & - BCM_LED_THREAD_RUNNING_ACTIVELY) { - ad->DriverState = DRIVER_INIT; - ad->LEDInfo.bLedInitDone = false; - wake_up(&ad->LEDInfo.notify_led_event); - } - } - - if (status != STATUS_SUCCESS) - up(&ad->fw_download_sema); - - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, OSAL_DBG, DBG_LVL_ALL, - "IOCTL: Firmware File Uploaded\n"); - kfree(fw_info); - return status; -} - -static int bcm_char_ioctl_buffer_download_stop(void __user *argp, - struct bcm_mini_adapter *ad) -{ - INT status; - int timeout = 0; - - if (!down_trylock(&ad->fw_download_sema)) { - up(&ad->fw_download_sema); - return -EINVAL; - } - - if (down_trylock(&ad->NVMRdmWrmLock)) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "FW download blocked as EEPROM Read/Write is in progress\n"); - up(&ad->fw_download_sema); - return -EACCES; - } - - ad->bBinDownloaded = TRUE; - ad->bCfgDownloaded = TRUE; - atomic_set(&ad->CurrNumFreeTxDesc, 0); - ad->CurrNumRecvDescs = 0; - ad->downloadDDR = 0; - - /* setting the Mips to Run */ - status = run_card_proc(ad); - - if (status) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Firm Download Failed\n"); - up(&ad->fw_download_sema); - up(&ad->NVMRdmWrmLock); - return status; - } - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, - DBG_LVL_ALL, "Firm Download Over...\n"); - - mdelay(10); - - /* Wait for MailBox Interrupt */ - if (StartInterruptUrb((struct bcm_interface_adapter *)ad->pvInterfaceAdapter)) - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Unable to send interrupt...\n"); - - timeout = 5*HZ; - ad->waiting_to_fw_download_done = false; - wait_event_timeout(ad->ioctl_fw_dnld_wait_queue, - ad->waiting_to_fw_download_done, timeout); - ad->fw_download_process_pid = INVALID_PID; - ad->fw_download_done = TRUE; - atomic_set(&ad->CurrNumFreeTxDesc, 0); - ad->CurrNumRecvDescs = 0; - ad->PrevNumRecvDescs = 0; - atomic_set(&ad->cntrlpktCnt, 0); - ad->LinkUpStatus = 0; - ad->LinkStatus = 0; - - if (ad->LEDInfo.led_thread_running & - BCM_LED_THREAD_RUNNING_ACTIVELY) { - ad->DriverState = FW_DOWNLOAD_DONE; - wake_up(&ad->LEDInfo.notify_led_event); - } - - if (!timeout) - status = -ENODEV; - - up(&ad->fw_download_sema); - up(&ad->NVMRdmWrmLock); - return status; -} - -static int bcm_char_ioctl_chip_reset(struct bcm_mini_adapter *ad) -{ - INT status; - INT nvm_access; - - nvm_access = down_trylock(&ad->NVMRdmWrmLock); - if (nvm_access) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - " IOCTL_BCM_CHIP_RESET not allowed as EEPROM Read/Write is in progress\n"); - return -EACCES; - } - - down(&ad->RxAppControlQueuelock); - status = reset_card_proc(ad); - flushAllAppQ(); - up(&ad->RxAppControlQueuelock); - up(&ad->NVMRdmWrmLock); - ResetCounters(ad); - return status; -} - -static int bcm_char_ioctl_qos_threshold(ULONG arg, - struct bcm_mini_adapter *ad) -{ - USHORT i; - - for (i = 0; i < NO_OF_QUEUES; i++) { - if (get_user(ad->PackInfo[i].uiThreshold, - (unsigned long __user *)arg)) { - return -EFAULT; - } - } - return 0; -} - -static int bcm_char_ioctl_switch_transfer_mode(void __user *argp, - struct bcm_mini_adapter *ad) -{ - UINT data = 0; - - if (copy_from_user(&data, argp, sizeof(UINT))) - return -EFAULT; - - if (data) { - /* Allow All Packets */ - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "IOCTL_BCM_SWITCH_TRANSFER_MODE: ETH_PACKET_TUNNELING_MODE\n"); - ad->TransferMode = ETH_PACKET_TUNNELING_MODE; - } else { - /* Allow IP only Packets */ - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "IOCTL_BCM_SWITCH_TRANSFER_MODE: IP_PACKET_ONLY_MODE\n"); - ad->TransferMode = IP_PACKET_ONLY_MODE; - } - return STATUS_SUCCESS; -} - -static int bcm_char_ioctl_get_driver_version(void __user *argp) -{ - struct bcm_ioctl_buffer io_buff; - ulong len; - - /* Copy Ioctl Buffer structure */ - if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) - return -EFAULT; - - len = min_t(ulong, io_buff.OutputLength, strlen(DRV_VERSION) + 1); - - if (copy_to_user(io_buff.OutputBuffer, DRV_VERSION, len)) - return -EFAULT; - - return STATUS_SUCCESS; -} - -static int bcm_char_ioctl_get_current_status(void __user *argp, - struct bcm_mini_adapter *ad) -{ - struct bcm_link_state link_state; - struct bcm_ioctl_buffer io_buff; - - /* Copy Ioctl Buffer structure */ - if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "copy_from_user failed..\n"); - return -EFAULT; - } - - if (io_buff.OutputLength != sizeof(link_state)) - return -EINVAL; - - memset(&link_state, 0, sizeof(link_state)); - link_state.bIdleMode = ad->IdleMode; - link_state.bShutdownMode = ad->bShutStatus; - link_state.ucLinkStatus = ad->LinkStatus; - - if (copy_to_user(io_buff.OutputBuffer, &link_state, min_t(size_t, - sizeof(link_state), io_buff.OutputLength))) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Copy_to_user Failed..\n"); - return -EFAULT; - } - return STATUS_SUCCESS; -} - - -static int bcm_char_ioctl_set_mac_tracing(void __user *argp, - struct bcm_mini_adapter *ad) -{ - struct bcm_ioctl_buffer io_buff; - UINT tracing_flag; - - /* copy ioctl Buffer structure */ - if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) - return -EFAULT; - - if (copy_from_user(&tracing_flag, io_buff.InputBuffer, sizeof(UINT))) - return -EFAULT; - - if (tracing_flag) - ad->pTarangs->MacTracingEnabled = TRUE; - else - ad->pTarangs->MacTracingEnabled = false; - - return STATUS_SUCCESS; -} - -static int bcm_char_ioctl_get_dsx_indication(void __user *argp, - struct bcm_mini_adapter *ad) -{ - struct bcm_ioctl_buffer io_buff; - ULONG sf_id = 0; - - if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) - return -EFAULT; - - if (io_buff.OutputLength < sizeof(struct bcm_add_indication_alt)) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Mismatch req: %lx needed is =0x%zx!!!", - io_buff.OutputLength, - sizeof(struct bcm_add_indication_alt)); - return -EINVAL; - } - - if (copy_from_user(&sf_id, io_buff.InputBuffer, sizeof(sf_id))) - return -EFAULT; - - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "Get DSX Data SF ID is =%lx\n", sf_id); - get_dsx_sf_data_to_application(ad, sf_id, io_buff.OutputBuffer); - return STATUS_SUCCESS; -} - -static int bcm_char_ioctl_get_host_mibs(void __user *argp, - struct bcm_mini_adapter *ad, - struct bcm_tarang_data *tarang) -{ - struct bcm_ioctl_buffer io_buff; - INT status = STATUS_FAILURE; - PVOID temp_buff; - - if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) - return -EFAULT; - - if (io_buff.OutputLength != sizeof(struct bcm_host_stats_mibs)) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Length Check failed %lu %zd\n", io_buff.OutputLength, - sizeof(struct bcm_host_stats_mibs)); - return -EINVAL; - } - - /* FIXME: HOST_STATS are too big for kmalloc (122048)! */ - temp_buff = kzalloc(sizeof(struct bcm_host_stats_mibs), GFP_KERNEL); - if (!temp_buff) - return STATUS_FAILURE; - - status = ProcessGetHostMibs(ad, temp_buff); - GetDroppedAppCntrlPktMibs(temp_buff, tarang); - - if (status != STATUS_FAILURE) { - if (copy_to_user(io_buff.OutputBuffer, temp_buff, - sizeof(struct bcm_host_stats_mibs))) { - kfree(temp_buff); - return -EFAULT; - } - } - - kfree(temp_buff); - return status; -} - -static int bcm_char_ioctl_bulk_wrm(void __user *argp, - struct bcm_mini_adapter *ad, UINT cmd) -{ - struct bcm_bulk_wrm_buffer *bulk_buff; - struct bcm_ioctl_buffer io_buff; - UINT tmp = 0; - INT status = STATUS_FAILURE; - PCHAR buff = NULL; - - if ((ad->IdleMode == TRUE) || - (ad->bShutStatus == TRUE) || - (ad->bPreparingForLowPowerMode == TRUE)) { - - BCM_DEBUG_PRINT (ad, DBG_TYPE_PRINTK, 0, 0, - "Device in Idle/Shutdown Mode, Blocking Wrms\n"); - return -EACCES; - } - - /* Copy Ioctl Buffer structure */ - if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) - return -EFAULT; - - if (io_buff.InputLength < sizeof(ULONG) * 2) - return -EINVAL; - - buff = memdup_user(io_buff.InputBuffer, - io_buff.InputLength); - if (IS_ERR(buff)) - return PTR_ERR(buff); - - bulk_buff = (struct bcm_bulk_wrm_buffer *)buff; - - if (((ULONG)bulk_buff->Register & 0x0F000000) != 0x0F000000 || - ((ULONG)bulk_buff->Register & 0x3)) { - BCM_DEBUG_PRINT (ad, DBG_TYPE_PRINTK, 0, 0, - "WRM Done On invalid Address : %x Access Denied.\n", - (int)bulk_buff->Register); - kfree(buff); - return -EINVAL; - } - - tmp = bulk_buff->Register & EEPROM_REJECT_MASK; - if (!((ad->pstargetparams->m_u32Customize)&VSG_MODE) && - ((tmp == EEPROM_REJECT_REG_1) || - (tmp == EEPROM_REJECT_REG_2) || - (tmp == EEPROM_REJECT_REG_3) || - (tmp == EEPROM_REJECT_REG_4)) && - (cmd == IOCTL_BCM_REGISTER_WRITE)) { - - kfree(buff); - BCM_DEBUG_PRINT (ad, DBG_TYPE_PRINTK, 0, 0, - "EEPROM Access Denied, not in VSG Mode\n"); - return -EFAULT; - } - - if (bulk_buff->SwapEndian == false) - status = wrmWithLock(ad, (UINT)bulk_buff->Register, - (PCHAR)bulk_buff->Values, - io_buff.InputLength - 2*sizeof(ULONG)); - else - status = wrmaltWithLock(ad, (UINT)bulk_buff->Register, - (PUINT)bulk_buff->Values, - io_buff.InputLength - 2*sizeof(ULONG)); - - if (status != STATUS_SUCCESS) - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, "WRM Failed\n"); - - kfree(buff); - return status; -} - -static int bcm_char_ioctl_get_nvm_size(void __user *argp, - struct bcm_mini_adapter *ad) -{ - struct bcm_ioctl_buffer io_buff; - - if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) - return -EFAULT; - - if (ad->eNVMType == NVM_EEPROM || ad->eNVMType == NVM_FLASH) { - if (copy_to_user(io_buff.OutputBuffer, &ad->uiNVMDSDSize, - sizeof(UINT))) - return -EFAULT; - } - - return STATUS_SUCCESS; -} - -static int bcm_char_ioctl_cal_init(void __user *argp, - struct bcm_mini_adapter *ad) -{ - struct bcm_ioctl_buffer io_buff; - UINT sector_size = 0; - INT status = STATUS_FAILURE; - - if (ad->eNVMType == NVM_FLASH) { - if (copy_from_user(&io_buff, argp, - sizeof(struct bcm_ioctl_buffer))) - return -EFAULT; - - if (copy_from_user(§or_size, io_buff.InputBuffer, - sizeof(UINT))) - return -EFAULT; - - if ((sector_size < MIN_SECTOR_SIZE) || - (sector_size > MAX_SECTOR_SIZE)) { - if (copy_to_user(io_buff.OutputBuffer, - &ad->uiSectorSize, sizeof(UINT))) - return -EFAULT; - } else { - if (IsFlash2x(ad)) { - if (copy_to_user(io_buff.OutputBuffer, - &ad->uiSectorSize, sizeof(UINT))) - return -EFAULT; - } else { - if ((TRUE == ad->bShutStatus) || - (TRUE == ad->IdleMode)) { - BCM_DEBUG_PRINT(ad, - DBG_TYPE_PRINTK, 0, 0, - "Device is in Idle/Shutdown Mode\n"); - return -EACCES; - } - - ad->uiSectorSize = sector_size; - BcmUpdateSectorSize(ad, - ad->uiSectorSize); - } - } - status = STATUS_SUCCESS; - } else { - status = STATUS_FAILURE; - } - return status; -} - -static int bcm_char_ioctl_set_debug(void __user *argp, - struct bcm_mini_adapter *ad) -{ -#ifdef DEBUG - struct bcm_ioctl_buffer io_buff; - struct bcm_user_debug_state user_debug_state; - - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "In SET_DEBUG ioctl\n"); - if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) - return -EFAULT; - - if (copy_from_user(&user_debug_state, io_buff.InputBuffer, - sizeof(struct bcm_user_debug_state))) - return -EFAULT; - - BCM_DEBUG_PRINT (ad, DBG_TYPE_PRINTK, 0, 0, - "IOCTL_BCM_SET_DEBUG: OnOff=%d Type = 0x%x ", - user_debug_state.OnOff, user_debug_state.Type); - /* user_debug_state.Subtype <<= 1; */ - user_debug_state.Subtype = 1 << user_debug_state.Subtype; - BCM_DEBUG_PRINT (ad, DBG_TYPE_PRINTK, 0, 0, - "actual Subtype=0x%x\n", user_debug_state.Subtype); - - /* Update new 'DebugState' in the ad */ - ad->stDebugState.type |= user_debug_state.Type; - /* Subtype: A bitmap of 32 bits for Subtype per Type. - * Valid indexes in 'subtype' array: 1,2,4,8 - * corresponding to valid Type values. Hence we can use the 'Type' field - * as the index value, ignoring the array entries 0,3,5,6,7 ! - */ - if (user_debug_state.OnOff) - ad->stDebugState.subtype[user_debug_state.Type] |= - user_debug_state.Subtype; - else - ad->stDebugState.subtype[user_debug_state.Type] &= - ~user_debug_state.Subtype; - - BCM_SHOW_DEBUG_BITMAP(ad); -#endif - return STATUS_SUCCESS; -} - -static int bcm_char_ioctl_nvm_rw(void __user *argp, - struct bcm_mini_adapter *ad, UINT cmd) -{ - struct bcm_nvm_readwrite nvm_rw; - struct timeval tv0, tv1; - struct bcm_ioctl_buffer io_buff; - PUCHAR read_data = NULL; - INT status = STATUS_FAILURE; - - memset(&tv0, 0, sizeof(struct timeval)); - memset(&tv1, 0, sizeof(struct timeval)); - if ((ad->eNVMType == NVM_FLASH) && - (ad->uiFlashLayoutMajorVersion == 0)) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "The Flash Control Section is Corrupted. Hence Rejection on NVM Read/Write\n"); - return -EFAULT; - } - - if (IsFlash2x(ad)) { - if ((ad->eActiveDSD != DSD0) && - (ad->eActiveDSD != DSD1) && - (ad->eActiveDSD != DSD2)) { - - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "No DSD is active..hence NVM Command is blocked"); - return STATUS_FAILURE; - } - } - - /* Copy Ioctl Buffer structure */ - if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) - return -EFAULT; - - if (copy_from_user(&nvm_rw, - (IOCTL_BCM_NVM_READ == cmd) ? - io_buff.OutputBuffer : io_buff.InputBuffer, - sizeof(struct bcm_nvm_readwrite))) - return -EFAULT; - - /* - * Deny the access if the offset crosses the cal area limit. - */ - if (nvm_rw.uiNumBytes > ad->uiNVMDSDSize) - return STATUS_FAILURE; - - if (nvm_rw.uiOffset > - ad->uiNVMDSDSize - nvm_rw.uiNumBytes) - return STATUS_FAILURE; - - read_data = memdup_user(nvm_rw.pBuffer, - nvm_rw.uiNumBytes); - if (IS_ERR(read_data)) - return PTR_ERR(read_data); - - do_gettimeofday(&tv0); - if (IOCTL_BCM_NVM_READ == cmd) { - int ret = bcm_handle_nvm_read_cmd(ad, read_data, - &nvm_rw); - if (ret != STATUS_SUCCESS) - return ret; - } else { - down(&ad->NVMRdmWrmLock); - - if ((ad->IdleMode == TRUE) || - (ad->bShutStatus == TRUE) || - (ad->bPreparingForLowPowerMode == TRUE)) { - - BCM_DEBUG_PRINT(ad, - DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "Device is in Idle/Shutdown Mode\n"); - up(&ad->NVMRdmWrmLock); - kfree(read_data); - return -EACCES; - } - - ad->bHeaderChangeAllowed = TRUE; - if (IsFlash2x(ad)) { - int ret = handle_flash2x_adapter(ad, - read_data, - &nvm_rw); - if (ret != STATUS_SUCCESS) - return ret; - } - - status = BeceemNVMWrite(ad, (PUINT)read_data, - nvm_rw.uiOffset, nvm_rw.uiNumBytes, - nvm_rw.bVerify); - if (IsFlash2x(ad)) - BcmFlash2xWriteSig(ad, ad->eActiveDSD); - - ad->bHeaderChangeAllowed = false; - - up(&ad->NVMRdmWrmLock); - - if (status != STATUS_SUCCESS) { - kfree(read_data); - return status; - } - } - - do_gettimeofday(&tv1); - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - " timetaken by Write/read :%ld msec\n", - (tv1.tv_sec - tv0.tv_sec)*1000 + - (tv1.tv_usec - tv0.tv_usec)/1000); - - kfree(read_data); - return STATUS_SUCCESS; -} - -static int bcm_char_ioctl_flash2x_section_read(void __user *argp, - struct bcm_mini_adapter *ad) -{ - struct bcm_flash2x_readwrite flash_2x_read = {0}; - struct bcm_ioctl_buffer io_buff; - PUCHAR read_buff = NULL; - UINT nob = 0; - UINT buff_size = 0; - UINT read_bytes = 0; - UINT read_offset = 0; - INT status = STATUS_FAILURE; - void __user *OutPutBuff; - - if (IsFlash2x(ad) != TRUE) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Flash Does not have 2.x map"); - return -EINVAL; - } - - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, - DBG_LVL_ALL, "IOCTL_BCM_FLASH2X_SECTION_READ Called"); - if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) - return -EFAULT; - - /* Reading FLASH 2.x READ structure */ - if (copy_from_user(&flash_2x_read, io_buff.InputBuffer, - sizeof(struct bcm_flash2x_readwrite))) - return -EFAULT; - - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "\nflash_2x_read.Section :%x", - flash_2x_read.Section); - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "\nflash_2x_read.offset :%x", - flash_2x_read.offset); - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "\nflash_2x_read.numOfBytes :%x", - flash_2x_read.numOfBytes); - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "\nflash_2x_read.bVerify :%x\n", - flash_2x_read.bVerify); - - /* This was internal to driver for raw read. - * now it has ben exposed to user space app. - */ - if (validateFlash2xReadWrite(ad, &flash_2x_read) == false) - return STATUS_FAILURE; - - nob = flash_2x_read.numOfBytes; - if (nob > ad->uiSectorSize) - buff_size = ad->uiSectorSize; - else - buff_size = nob; - - read_offset = flash_2x_read.offset; - OutPutBuff = io_buff.OutputBuffer; - read_buff = kzalloc(buff_size , GFP_KERNEL); - - if (read_buff == NULL) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Memory allocation failed for Flash 2.x Read Structure"); - return -ENOMEM; - } - down(&ad->NVMRdmWrmLock); - - if ((ad->IdleMode == TRUE) || - (ad->bShutStatus == TRUE) || - (ad->bPreparingForLowPowerMode == TRUE)) { - - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, - DBG_LVL_ALL, - "Device is in Idle/Shutdown Mode\n"); - up(&ad->NVMRdmWrmLock); - kfree(read_buff); - return -EACCES; - } - - while (nob) { - if (nob > ad->uiSectorSize) - read_bytes = ad->uiSectorSize; - else - read_bytes = nob; - - /* Reading the data from Flash 2.x */ - status = BcmFlash2xBulkRead(ad, (PUINT)read_buff, - flash_2x_read.Section, read_offset, read_bytes); - if (status) { - BCM_DEBUG_PRINT(ad, - DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "Flash 2x read err with status :%d", - status); - break; - } - - BCM_DEBUG_PRINT_BUFFER(ad, DBG_TYPE_OTHERS, OSAL_DBG, - DBG_LVL_ALL, read_buff, read_bytes); - - status = copy_to_user(OutPutBuff, read_buff, read_bytes); - if (status) { - BCM_DEBUG_PRINT(ad, - DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "Copy to use failed with status :%d", status); - up(&ad->NVMRdmWrmLock); - kfree(read_buff); - return -EFAULT; - } - nob = nob - read_bytes; - if (nob) { - read_offset = read_offset + read_bytes; - OutPutBuff = OutPutBuff + read_bytes; - } - } - - up(&ad->NVMRdmWrmLock); - kfree(read_buff); - return status; -} - -static int bcm_char_ioctl_flash2x_section_write(void __user *argp, - struct bcm_mini_adapter *ad) -{ - struct bcm_flash2x_readwrite sFlash2xWrite = {0}; - struct bcm_ioctl_buffer io_buff; - PUCHAR write_buff; - void __user *input_addr; - UINT nob = 0; - UINT buff_size = 0; - UINT write_off = 0; - UINT write_bytes = 0; - INT status = STATUS_FAILURE; - - if (IsFlash2x(ad) != TRUE) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Flash Does not have 2.x map"); - return -EINVAL; - } - - /* First make this False so that we can enable the Sector - * Permission Check in BeceemFlashBulkWrite - */ - ad->bAllDSDWriteAllow = false; - - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "IOCTL_BCM_FLASH2X_SECTION_WRITE Called"); - - if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) - return -EFAULT; - - /* Reading FLASH 2.x READ structure */ - if (copy_from_user(&sFlash2xWrite, io_buff.InputBuffer, - sizeof(struct bcm_flash2x_readwrite))) - return -EFAULT; - - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "\nsFlash2xWrite.Section :%x", sFlash2xWrite.Section); - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "\nsFlash2xWrite.offset :%d", sFlash2xWrite.offset); - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "\nsFlash2xWrite.numOfBytes :%x", sFlash2xWrite.numOfBytes); - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "\nsFlash2xWrite.bVerify :%x\n", sFlash2xWrite.bVerify); - - if ((sFlash2xWrite.Section != VSA0) && (sFlash2xWrite.Section != VSA1) - && (sFlash2xWrite.Section != VSA2)) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "Only VSA write is allowed"); - return -EINVAL; - } - - if (validateFlash2xReadWrite(ad, &sFlash2xWrite) == false) - return STATUS_FAILURE; - - input_addr = sFlash2xWrite.pDataBuff; - write_off = sFlash2xWrite.offset; - nob = sFlash2xWrite.numOfBytes; - - if (nob > ad->uiSectorSize) - buff_size = ad->uiSectorSize; - else - buff_size = nob; - - write_buff = kmalloc(buff_size, GFP_KERNEL); - - if (write_buff == NULL) - return -ENOMEM; - - /* extracting the remainder of the given offset. */ - write_bytes = ad->uiSectorSize; - if (write_off % ad->uiSectorSize) { - write_bytes = ad->uiSectorSize - - (write_off % ad->uiSectorSize); - } - - if (nob < write_bytes) - write_bytes = nob; - - down(&ad->NVMRdmWrmLock); - - if ((ad->IdleMode == TRUE) || - (ad->bShutStatus == TRUE) || - (ad->bPreparingForLowPowerMode == TRUE)) { - - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "Device is in Idle/Shutdown Mode\n"); - up(&ad->NVMRdmWrmLock); - kfree(write_buff); - return -EACCES; - } - - BcmFlash2xCorruptSig(ad, sFlash2xWrite.Section); - do { - status = copy_from_user(write_buff, input_addr, write_bytes); - if (status) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Copy to user failed with status :%d", status); - up(&ad->NVMRdmWrmLock); - kfree(write_buff); - return -EFAULT; - } - BCM_DEBUG_PRINT_BUFFER(ad, DBG_TYPE_OTHERS, - OSAL_DBG, DBG_LVL_ALL, write_buff, write_bytes); - - /* Writing the data from Flash 2.x */ - status = BcmFlash2xBulkWrite(ad, (PUINT)write_buff, - sFlash2xWrite.Section, - write_off, - write_bytes, - sFlash2xWrite.bVerify); - - if (status) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Flash 2x read err with status :%d", status); - break; - } - - nob = nob - write_bytes; - if (nob) { - write_off = write_off + write_bytes; - input_addr = input_addr + write_bytes; - if (nob > ad->uiSectorSize) - write_bytes = ad->uiSectorSize; - else - write_bytes = nob; - } - } while (nob > 0); - - BcmFlash2xWriteSig(ad, sFlash2xWrite.Section); - up(&ad->NVMRdmWrmLock); - kfree(write_buff); - return status; -} - -static int bcm_char_ioctl_flash2x_section_bitmap(void __user *argp, - struct bcm_mini_adapter *ad) -{ - struct bcm_flash2x_bitmap *flash_2x_bit_map; - struct bcm_ioctl_buffer io_buff; - -BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "IOCTL_BCM_GET_FLASH2X_SECTION_BITMAP Called"); - - if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) - return -EFAULT; - - if (io_buff.OutputLength != sizeof(struct bcm_flash2x_bitmap)) - return -EINVAL; - - flash_2x_bit_map = kzalloc(sizeof(struct bcm_flash2x_bitmap), - GFP_KERNEL); - - if (flash_2x_bit_map == NULL) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Memory is not available"); - return -ENOMEM; - } - - /* Reading the Flash Sectio Bit map */ - down(&ad->NVMRdmWrmLock); - - if ((ad->IdleMode == TRUE) || - (ad->bShutStatus == TRUE) || - (ad->bPreparingForLowPowerMode == TRUE)) { - - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "Device is in Idle/Shutdown Mode\n"); - up(&ad->NVMRdmWrmLock); - kfree(flash_2x_bit_map); - return -EACCES; - } - - BcmGetFlash2xSectionalBitMap(ad, flash_2x_bit_map); - up(&ad->NVMRdmWrmLock); - if (copy_to_user(io_buff.OutputBuffer, flash_2x_bit_map, - sizeof(struct bcm_flash2x_bitmap))) { - kfree(flash_2x_bit_map); - return -EFAULT; - } - - kfree(flash_2x_bit_map); - return STATUS_FAILURE; -} - -static int bcm_char_ioctl_set_active_section(void __user *argp, - struct bcm_mini_adapter *ad) -{ - enum bcm_flash2x_section_val flash_2x_section_val = 0; - INT status = STATUS_FAILURE; - struct bcm_ioctl_buffer io_buff; - - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "IOCTL_BCM_SET_ACTIVE_SECTION Called"); - - if (IsFlash2x(ad) != TRUE) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Flash Does not have 2.x map"); - return -EINVAL; - } - - status = copy_from_user(&io_buff, argp, - sizeof(struct bcm_ioctl_buffer)); - if (status) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Copy of IOCTL BUFFER failed"); - return -EFAULT; - } - - status = copy_from_user(&flash_2x_section_val, - io_buff.InputBuffer, sizeof(INT)); - if (status) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Copy of flash section val failed"); - return -EFAULT; - } - - down(&ad->NVMRdmWrmLock); - - if ((ad->IdleMode == TRUE) || - (ad->bShutStatus == TRUE) || - (ad->bPreparingForLowPowerMode == TRUE)) { - - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "Device is in Idle/Shutdown Mode\n"); - up(&ad->NVMRdmWrmLock); - return -EACCES; - } - - status = BcmSetActiveSection(ad, flash_2x_section_val); - if (status) - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Failed to make it's priority Highest. status %d", - status); - - up(&ad->NVMRdmWrmLock); - - return status; -} - -static int bcm_char_ioctl_copy_section(void __user *argp, - struct bcm_mini_adapter *ad) -{ - struct bcm_flash2x_copy_section copy_sect_strut = {0}; - struct bcm_ioctl_buffer io_buff; - INT status = STATUS_SUCCESS; - - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "IOCTL_BCM_COPY_SECTION Called"); - - ad->bAllDSDWriteAllow = false; - if (IsFlash2x(ad) != TRUE) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Flash Does not have 2.x map"); - return -EINVAL; - } - - status = copy_from_user(&io_buff, argp, - sizeof(struct bcm_ioctl_buffer)); - if (status) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Copy of IOCTL BUFFER failed status :%d", - status); - return -EFAULT; - } - - status = copy_from_user(©_sect_strut, io_buff.InputBuffer, - sizeof(struct bcm_flash2x_copy_section)); - if (status) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Copy of Copy_Section_Struct failed with status :%d", - status); - return -EFAULT; - } - - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "Source SEction :%x", copy_sect_strut.SrcSection); - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "Destination SEction :%x", copy_sect_strut.DstSection); - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "offset :%x", copy_sect_strut.offset); - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "nob :%x", copy_sect_strut.numOfBytes); - - if (IsSectionExistInFlash(ad, copy_sect_strut.SrcSection) == false) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Source Section<%x> does not exist in Flash ", - copy_sect_strut.SrcSection); - return -EINVAL; - } - - if (IsSectionExistInFlash(ad, copy_sect_strut.DstSection) == false) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Destinatio Section<%x> does not exist in Flash ", - copy_sect_strut.DstSection); - return -EINVAL; - } - - if (copy_sect_strut.SrcSection == copy_sect_strut.DstSection) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "Source and Destination section should be different"); - return -EINVAL; - } - - down(&ad->NVMRdmWrmLock); - - if ((ad->IdleMode == TRUE) || - (ad->bShutStatus == TRUE) || - (ad->bPreparingForLowPowerMode == TRUE)) { - - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "Device is in Idle/Shutdown Mode\n"); - up(&ad->NVMRdmWrmLock); - return -EACCES; - } - - if (copy_sect_strut.SrcSection == ISO_IMAGE1 || - copy_sect_strut.SrcSection == ISO_IMAGE2) { - if (IsNonCDLessDevice(ad)) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Device is Non-CDLess hence won't have ISO !!"); - status = -EINVAL; - } else if (copy_sect_strut.numOfBytes == 0) { - status = BcmCopyISO(ad, copy_sect_strut); - } else { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Partial Copy of ISO section is not Allowed.."); - status = STATUS_FAILURE; - } - up(&ad->NVMRdmWrmLock); - return status; - } - - status = BcmCopySection(ad, copy_sect_strut.SrcSection, - copy_sect_strut.DstSection, - copy_sect_strut.offset, - copy_sect_strut.numOfBytes); - up(&ad->NVMRdmWrmLock); - return status; -} - -static int bcm_char_ioctl_get_flash_cs_info(void __user *argp, - struct bcm_mini_adapter *ad) -{ - struct bcm_ioctl_buffer io_buff; - INT status = STATUS_SUCCESS; - - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - " IOCTL_BCM_GET_FLASH_CS_INFO Called"); - - status = copy_from_user(&io_buff, argp, - sizeof(struct bcm_ioctl_buffer)); - if (status) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Copy of IOCTL BUFFER failed"); - return -EFAULT; - } - - if (ad->eNVMType != NVM_FLASH) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Connected device does not have flash"); - return -EINVAL; - } - - if (IsFlash2x(ad) == TRUE) { - if (io_buff.OutputLength < sizeof(struct bcm_flash2x_cs_info)) - return -EINVAL; - - if (copy_to_user(io_buff.OutputBuffer, - ad->psFlash2xCSInfo, - sizeof(struct bcm_flash2x_cs_info))) - return -EFAULT; - } else { - if (io_buff.OutputLength < sizeof(struct bcm_flash_cs_info)) - return -EINVAL; - - if (copy_to_user(io_buff.OutputBuffer, ad->psFlashCSInfo, - sizeof(struct bcm_flash_cs_info))) - return -EFAULT; - } - return status; -} - -static int bcm_char_ioctl_select_dsd(void __user *argp, - struct bcm_mini_adapter *ad) -{ - struct bcm_ioctl_buffer io_buff; - INT status = STATUS_FAILURE; - UINT sect_offset = 0; - enum bcm_flash2x_section_val flash_2x_section_val; - - flash_2x_section_val = NO_SECTION_VAL; - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "IOCTL_BCM_SELECT_DSD Called"); - - if (IsFlash2x(ad) != TRUE) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Flash Does not have 2.x map"); - return -EINVAL; - } - - status = copy_from_user(&io_buff, argp, - sizeof(struct bcm_ioctl_buffer)); - if (status) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Copy of IOCTL BUFFER failed"); - return -EFAULT; - } - status = copy_from_user(&flash_2x_section_val, io_buff.InputBuffer, - sizeof(INT)); - if (status) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Copy of flash section val failed"); - return -EFAULT; - } - - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "Read Section :%d", flash_2x_section_val); - if ((flash_2x_section_val != DSD0) && - (flash_2x_section_val != DSD1) && - (flash_2x_section_val != DSD2)) { - - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Passed section<%x> is not DSD section", - flash_2x_section_val); - return STATUS_FAILURE; - } - - sect_offset = BcmGetSectionValStartOffset(ad, flash_2x_section_val); - if (sect_offset == INVALID_OFFSET) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Provided Section val <%d> does not exist in Flash 2.x", - flash_2x_section_val); - return -EINVAL; - } - - ad->bAllDSDWriteAllow = TRUE; - ad->ulFlashCalStart = sect_offset; - ad->eActiveDSD = flash_2x_section_val; - - return STATUS_SUCCESS; -} - -static int bcm_char_ioctl_nvm_raw_read(void __user *argp, - struct bcm_mini_adapter *ad) -{ - struct bcm_nvm_readwrite nvm_read; - struct bcm_ioctl_buffer io_buff; - unsigned int nob; - INT buff_size; - INT read_offset = 0; - UINT read_bytes = 0; - PUCHAR read_buff; - void __user *OutPutBuff; - INT status = STATUS_FAILURE; - - if (ad->eNVMType != NVM_FLASH) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "NVM TYPE is not Flash"); - return -EINVAL; - } - - /* Copy Ioctl Buffer structure */ - if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "copy_from_user 1 failed\n"); - return -EFAULT; - } - - if (copy_from_user(&nvm_read, io_buff.OutputBuffer, - sizeof(struct bcm_nvm_readwrite))) - return -EFAULT; - - nob = nvm_read.uiNumBytes; - /* In Raw-Read max Buff size : 64MB */ - - if (nob > DEFAULT_BUFF_SIZE) - buff_size = DEFAULT_BUFF_SIZE; - else - buff_size = nob; - - read_offset = nvm_read.uiOffset; - OutPutBuff = nvm_read.pBuffer; - - read_buff = kzalloc(buff_size , GFP_KERNEL); - if (read_buff == NULL) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Memory allocation failed for Flash 2.x Read Structure"); - return -ENOMEM; - } - down(&ad->NVMRdmWrmLock); - - if ((ad->IdleMode == TRUE) || - (ad->bShutStatus == TRUE) || - (ad->bPreparingForLowPowerMode == TRUE)) { - - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "Device is in Idle/Shutdown Mode\n"); - kfree(read_buff); - up(&ad->NVMRdmWrmLock); - return -EACCES; - } - - ad->bFlashRawRead = TRUE; - - while (nob) { - if (nob > DEFAULT_BUFF_SIZE) - read_bytes = DEFAULT_BUFF_SIZE; - else - read_bytes = nob; - - /* Reading the data from Flash 2.x */ - status = BeceemNVMRead(ad, (PUINT)read_buff, - read_offset, read_bytes); - if (status) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Flash 2x read err with status :%d", - status); - break; - } - - BCM_DEBUG_PRINT_BUFFER(ad, DBG_TYPE_OTHERS, OSAL_DBG, - DBG_LVL_ALL, read_buff, read_bytes); - - status = copy_to_user(OutPutBuff, read_buff, read_bytes); - if (status) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, - "Copy to use failed with status :%d", - status); - up(&ad->NVMRdmWrmLock); - kfree(read_buff); - return -EFAULT; - } - nob = nob - read_bytes; - if (nob) { - read_offset = read_offset + read_bytes; - OutPutBuff = OutPutBuff + read_bytes; - } - } - ad->bFlashRawRead = false; - up(&ad->NVMRdmWrmLock); - kfree(read_buff); - return status; -} - -static int bcm_char_ioctl_cntrlmsg_mask(void __user *argp, - struct bcm_mini_adapter *ad, - struct bcm_tarang_data *tarang) -{ - struct bcm_ioctl_buffer io_buff; - INT status = STATUS_FAILURE; - ULONG rx_cntrl_msg_bit_mask = 0; - - /* Copy Ioctl Buffer structure */ - status = copy_from_user(&io_buff, argp, - sizeof(struct bcm_ioctl_buffer)); - if (status) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "copy of Ioctl buffer is failed from user space"); - return -EFAULT; - } - - if (io_buff.InputLength != sizeof(unsigned long)) - return -EINVAL; - - status = copy_from_user(&rx_cntrl_msg_bit_mask, io_buff.InputBuffer, - io_buff.InputLength); - if (status) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "copy of control bit mask failed from user space"); - return -EFAULT; - } - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "\n Got user defined cntrl msg bit mask :%lx", - rx_cntrl_msg_bit_mask); - tarang->RxCntrlMsgBitMask = rx_cntrl_msg_bit_mask; - - return status; -} - -static int bcm_char_ioctl_get_device_driver_info(void __user *argp, - struct bcm_mini_adapter *ad) -{ - struct bcm_driver_info dev_info; - struct bcm_ioctl_buffer io_buff; - - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n"); - - memset(&dev_info, 0, sizeof(dev_info)); - dev_info.MaxRDMBufferSize = BUFFER_4K; - dev_info.u32DSDStartOffset = EEPROM_CALPARAM_START; - dev_info.u32RxAlignmentCorrection = 0; - dev_info.u32NVMType = ad->eNVMType; - dev_info.u32InterfaceType = BCM_USB; - - if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) - return -EFAULT; - - if (io_buff.OutputLength < sizeof(dev_info)) - return -EINVAL; - - if (copy_to_user(io_buff.OutputBuffer, &dev_info, sizeof(dev_info))) - return -EFAULT; - - return STATUS_SUCCESS; -} - -static int bcm_char_ioctl_time_since_net_entry(void __user *argp, - struct bcm_mini_adapter *ad) -{ - struct bcm_time_elapsed time_elapsed_since_net_entry = {0}; - struct bcm_ioctl_buffer io_buff; - - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "IOCTL_BCM_TIME_SINCE_NET_ENTRY called"); - - if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) - return -EFAULT; - - if (io_buff.OutputLength < sizeof(struct bcm_time_elapsed)) - return -EINVAL; - - time_elapsed_since_net_entry.ul64TimeElapsedSinceNetEntry = - get_seconds() - ad->liTimeSinceLastNetEntry; - - if (copy_to_user(io_buff.OutputBuffer, &time_elapsed_since_net_entry, - sizeof(struct bcm_time_elapsed))) - return -EFAULT; - - return STATUS_SUCCESS; -} - - -static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) -{ - struct bcm_tarang_data *tarang = filp->private_data; - void __user *argp = (void __user *)arg; - struct bcm_mini_adapter *ad = tarang->Adapter; - INT status = STATUS_FAILURE; - - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "Parameters Passed to control IOCTL cmd=0x%X arg=0x%lX", - cmd, arg); - - if (_IOC_TYPE(cmd) != BCM_IOCTL) - return -EFAULT; - if (_IOC_DIR(cmd) & _IOC_READ) - status = !access_ok(VERIFY_WRITE, argp, _IOC_SIZE(cmd)); - else if (_IOC_DIR(cmd) & _IOC_WRITE) - status = !access_ok(VERIFY_READ, argp, _IOC_SIZE(cmd)); - else if (_IOC_NONE == (_IOC_DIR(cmd) & _IOC_NONE)) - status = STATUS_SUCCESS; - - if (status) - return -EFAULT; - - if (ad->device_removed) - return -EFAULT; - - if (false == ad->fw_download_done) { - switch (cmd) { - case IOCTL_MAC_ADDR_REQ: - case IOCTL_LINK_REQ: - case IOCTL_CM_REQUEST: - case IOCTL_SS_INFO_REQ: - case IOCTL_SEND_CONTROL_MESSAGE: - case IOCTL_IDLE_REQ: - case IOCTL_BCM_GPIO_SET_REQUEST: - case IOCTL_BCM_GPIO_STATUS_REQUEST: - return -EACCES; - default: - break; - } - } - - status = vendorextnIoctl(ad, cmd, arg); - if (status != CONTINUE_COMMON_PATH) - return status; - - switch (cmd) { - /* Rdms for Swin Idle... */ - case IOCTL_BCM_REGISTER_READ_PRIVATE: - status = bcm_char_ioctl_reg_read_private(argp, ad); - return status; - - case IOCTL_BCM_REGISTER_WRITE_PRIVATE: - status = bcm_char_ioctl_reg_write_private(argp, ad); - return status; - - case IOCTL_BCM_REGISTER_READ: - case IOCTL_BCM_EEPROM_REGISTER_READ: - status = bcm_char_ioctl_eeprom_reg_read(argp, ad); - return status; - - case IOCTL_BCM_REGISTER_WRITE: - case IOCTL_BCM_EEPROM_REGISTER_WRITE: - status = bcm_char_ioctl_eeprom_reg_write(argp, ad, cmd); - return status; - - case IOCTL_BCM_GPIO_SET_REQUEST: - status = bcm_char_ioctl_gpio_set_request(argp, ad); - return status; - - case BCM_LED_THREAD_STATE_CHANGE_REQ: - status = bcm_char_ioctl_led_thread_state_change_req(argp, - ad); - return status; - - case IOCTL_BCM_GPIO_STATUS_REQUEST: - status = bcm_char_ioctl_gpio_status_request(argp, ad); - return status; - - case IOCTL_BCM_GPIO_MULTI_REQUEST: - status = bcm_char_ioctl_gpio_multi_request(argp, ad); - return status; - - case IOCTL_BCM_GPIO_MODE_REQUEST: - status = bcm_char_ioctl_gpio_mode_request(argp, ad); - return status; - - case IOCTL_MAC_ADDR_REQ: - case IOCTL_LINK_REQ: - case IOCTL_CM_REQUEST: - case IOCTL_SS_INFO_REQ: - case IOCTL_SEND_CONTROL_MESSAGE: - case IOCTL_IDLE_REQ: - status = bcm_char_ioctl_misc_request(argp, ad); - return status; - - case IOCTL_BCM_BUFFER_DOWNLOAD_START: - status = bcm_char_ioctl_buffer_download_start(ad); - return status; - - case IOCTL_BCM_BUFFER_DOWNLOAD: - status = bcm_char_ioctl_buffer_download(argp, ad); - return status; - - case IOCTL_BCM_BUFFER_DOWNLOAD_STOP: - status = bcm_char_ioctl_buffer_download_stop(argp, ad); - return status; - - - case IOCTL_BE_BUCKET_SIZE: - status = 0; - if (get_user(ad->BEBucketSize, - (unsigned long __user *)arg)) - status = -EFAULT; - break; - - case IOCTL_RTPS_BUCKET_SIZE: - status = 0; - if (get_user(ad->rtPSBucketSize, - (unsigned long __user *)arg)) - status = -EFAULT; - break; - - case IOCTL_CHIP_RESET: - status = bcm_char_ioctl_chip_reset(ad); - return status; - - case IOCTL_QOS_THRESHOLD: - status = bcm_char_ioctl_qos_threshold(arg, ad); - return status; - - case IOCTL_DUMP_PACKET_INFO: - DumpPackInfo(ad); - DumpPhsRules(&ad->stBCMPhsContext); - status = STATUS_SUCCESS; - break; - - case IOCTL_GET_PACK_INFO: - if (copy_to_user(argp, &ad->PackInfo, - sizeof(struct bcm_packet_info)*NO_OF_QUEUES)) - return -EFAULT; - status = STATUS_SUCCESS; - break; - - case IOCTL_BCM_SWITCH_TRANSFER_MODE: - status = bcm_char_ioctl_switch_transfer_mode(argp, ad); - return status; - - case IOCTL_BCM_GET_DRIVER_VERSION: - status = bcm_char_ioctl_get_driver_version(argp); - return status; - - case IOCTL_BCM_GET_CURRENT_STATUS: - status = bcm_char_ioctl_get_current_status(argp, ad); - return status; - - case IOCTL_BCM_SET_MAC_TRACING: - status = bcm_char_ioctl_set_mac_tracing(argp, ad); - return status; - - case IOCTL_BCM_GET_DSX_INDICATION: - status = bcm_char_ioctl_get_dsx_indication(argp, ad); - return status; - - case IOCTL_BCM_GET_HOST_MIBS: - status = bcm_char_ioctl_get_host_mibs(argp, ad, tarang); - return status; - - case IOCTL_BCM_WAKE_UP_DEVICE_FROM_IDLE: - if ((false == ad->bTriedToWakeUpFromlowPowerMode) && - (TRUE == ad->IdleMode)) { - ad->usIdleModePattern = ABORT_IDLE_MODE; - ad->bWakeUpDevice = TRUE; - wake_up(&ad->process_rx_cntrlpkt); - } - - status = STATUS_SUCCESS; - break; - - case IOCTL_BCM_BULK_WRM: - status = bcm_char_ioctl_bulk_wrm(argp, ad, cmd); - return status; - - case IOCTL_BCM_GET_NVM_SIZE: - status = bcm_char_ioctl_get_nvm_size(argp, ad); - return status; - - case IOCTL_BCM_CAL_INIT: - status = bcm_char_ioctl_cal_init(argp, ad); - return status; - - case IOCTL_BCM_SET_DEBUG: - status = bcm_char_ioctl_set_debug(argp, ad); - return status; - - case IOCTL_BCM_NVM_READ: - case IOCTL_BCM_NVM_WRITE: - status = bcm_char_ioctl_nvm_rw(argp, ad, cmd); - return status; - - case IOCTL_BCM_FLASH2X_SECTION_READ: - status = bcm_char_ioctl_flash2x_section_read(argp, ad); - return status; - - case IOCTL_BCM_FLASH2X_SECTION_WRITE: - status = bcm_char_ioctl_flash2x_section_write(argp, ad); - return status; - - case IOCTL_BCM_GET_FLASH2X_SECTION_BITMAP: - status = bcm_char_ioctl_flash2x_section_bitmap(argp, ad); - return status; - - case IOCTL_BCM_SET_ACTIVE_SECTION: - status = bcm_char_ioctl_set_active_section(argp, ad); - return status; - - case IOCTL_BCM_IDENTIFY_ACTIVE_SECTION: - /* Right Now we are taking care of only DSD */ - ad->bAllDSDWriteAllow = false; - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "IOCTL_BCM_IDENTIFY_ACTIVE_SECTION called"); - status = STATUS_SUCCESS; - break; - - case IOCTL_BCM_COPY_SECTION: - status = bcm_char_ioctl_copy_section(argp, ad); - return status; - - case IOCTL_BCM_GET_FLASH_CS_INFO: - status = bcm_char_ioctl_get_flash_cs_info(argp, ad); - return status; - - case IOCTL_BCM_SELECT_DSD: - status = bcm_char_ioctl_select_dsd(argp, ad); - return status; - - case IOCTL_BCM_NVM_RAW_READ: - status = bcm_char_ioctl_nvm_raw_read(argp, ad); - return status; - - case IOCTL_BCM_CNTRLMSG_MASK: - status = bcm_char_ioctl_cntrlmsg_mask(argp, ad, tarang); - return status; - - case IOCTL_BCM_GET_DEVICE_DRIVER_INFO: - status = bcm_char_ioctl_get_device_driver_info(argp, ad); - return status; - - case IOCTL_BCM_TIME_SINCE_NET_ENTRY: - status = bcm_char_ioctl_time_since_net_entry(argp, ad); - return status; - - case IOCTL_CLOSE_NOTIFICATION: - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, - "IOCTL_CLOSE_NOTIFICATION"); - break; - - default: - pr_info(DRV_NAME ": unknown ioctl cmd=%#x\n", cmd); - status = STATUS_FAILURE; - break; - } - return status; -} - - -static const struct file_operations bcm_fops = { - .owner = THIS_MODULE, - .open = bcm_char_open, - .release = bcm_char_release, - .read = bcm_char_read, - .unlocked_ioctl = bcm_char_ioctl, - .llseek = no_llseek, -}; - -int register_control_device_interface(struct bcm_mini_adapter *ad) -{ - - if (ad->major > 0) - return ad->major; - - ad->major = register_chrdev(0, DEV_NAME, &bcm_fops); - if (ad->major < 0) { - pr_err(DRV_NAME ": could not created character device\n"); - return ad->major; - } - - ad->pstCreatedClassDevice = device_create(bcm_class, NULL, - MKDEV(ad->major, 0), - ad, DEV_NAME); - - if (IS_ERR(ad->pstCreatedClassDevice)) { - pr_err(DRV_NAME ": class device create failed\n"); - unregister_chrdev(ad->major, DEV_NAME); - return PTR_ERR(ad->pstCreatedClassDevice); - } - - return 0; -} - -void unregister_control_device_interface(struct bcm_mini_adapter *ad) -{ - if (ad->major > 0) { - device_destroy(bcm_class, MKDEV(ad->major, 0)); - unregister_chrdev(ad->major, DEV_NAME); - } -} - diff --git a/drivers/staging/bcm/Bcmnet.c b/drivers/staging/bcm/Bcmnet.c deleted file mode 100644 index e57767684cee..000000000000 --- a/drivers/staging/bcm/Bcmnet.c +++ /dev/null @@ -1,240 +0,0 @@ -#include "headers.h" - -struct net_device *gblpnetdev; - -static INT bcm_open(struct net_device *dev) -{ - struct bcm_mini_adapter *ad = GET_BCM_ADAPTER(dev); - - if (ad->fw_download_done == false) { - pr_notice(PFX "%s: link up failed (download in progress)\n", - dev->name); - return -EBUSY; - } - - if (netif_msg_ifup(ad)) - pr_info(PFX "%s: enabling interface\n", dev->name); - - if (ad->LinkUpStatus) { - if (netif_msg_link(ad)) - pr_info(PFX "%s: link up\n", dev->name); - - netif_carrier_on(ad->dev); - netif_start_queue(ad->dev); - } - - return 0; -} - -static INT bcm_close(struct net_device *dev) -{ - struct bcm_mini_adapter *ad = GET_BCM_ADAPTER(dev); - - if (netif_msg_ifdown(ad)) - pr_info(PFX "%s: disabling interface\n", dev->name); - - netif_carrier_off(dev); - netif_stop_queue(dev); - - return 0; -} - -static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) -{ - return ClassifyPacket(netdev_priv(dev), skb); -} - -/******************************************************************* -* Function - bcm_transmit() -* -* Description - This is the main transmit function for our virtual -* interface(eth0). It handles the ARP packets. It -* clones this packet and then Queue it to a suitable -* Queue. Then calls the transmit_packet(). -* -* Parameter - skb - Pointer to the socket buffer structure -* dev - Pointer to the virtual net device structure -* -*********************************************************************/ - -static netdev_tx_t bcm_transmit(struct sk_buff *skb, struct net_device *dev) -{ - struct bcm_mini_adapter *ad = GET_BCM_ADAPTER(dev); - u16 qindex = skb_get_queue_mapping(skb); - - - if (ad->device_removed || !ad->LinkUpStatus) - goto drop; - - if (ad->TransferMode != IP_PACKET_ONLY_MODE) - goto drop; - - if (INVALID_QUEUE_INDEX == qindex) - goto drop; - - if (ad->PackInfo[qindex].uiCurrentPacketsOnHost >= - SF_MAX_ALLOWED_PACKETS_TO_BACKUP) - return NETDEV_TX_BUSY; - - /* Now Enqueue the packet */ - if (netif_msg_tx_queued(ad)) - pr_info(PFX "%s: enqueueing packet to queue %d\n", - dev->name, qindex); - - spin_lock(&ad->PackInfo[qindex].SFQueueLock); - ad->PackInfo[qindex].uiCurrentBytesOnHost += skb->len; - ad->PackInfo[qindex].uiCurrentPacketsOnHost++; - - *((B_UINT32 *) skb->cb + SKB_CB_LATENCY_OFFSET) = jiffies; - ENQUEUEPACKET(ad->PackInfo[qindex].FirstTxQueue, - ad->PackInfo[qindex].LastTxQueue, skb); - atomic_inc(&ad->TotalPacketCount); - spin_unlock(&ad->PackInfo[qindex].SFQueueLock); - - /* FIXME - this is racy and incorrect, replace with work queue */ - if (!atomic_read(&ad->TxPktAvail)) { - atomic_set(&ad->TxPktAvail, 1); - wake_up(&ad->tx_packet_wait_queue); - } - return NETDEV_TX_OK; - - drop: - dev_kfree_skb(skb); - return NETDEV_TX_OK; -} - - - -/** -@ingroup init_functions -Register other driver entry points with the kernel -*/ -static const struct net_device_ops bcmNetDevOps = { - .ndo_open = bcm_open, - .ndo_stop = bcm_close, - .ndo_start_xmit = bcm_transmit, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, - .ndo_select_queue = bcm_select_queue, -}; - -static struct device_type wimax_type = { - .name = "wimax", -}; - -static int bcm_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - cmd->supported = 0; - cmd->advertising = 0; - cmd->speed = SPEED_10000; - cmd->duplex = DUPLEX_FULL; - cmd->port = PORT_TP; - cmd->phy_address = 0; - cmd->transceiver = XCVR_INTERNAL; - cmd->autoneg = AUTONEG_DISABLE; - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 0; - return 0; -} - -static void bcm_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - struct bcm_mini_adapter *ad = GET_BCM_ADAPTER(dev); - struct bcm_interface_adapter *intf_ad = ad->pvInterfaceAdapter; - struct usb_device *udev = interface_to_usbdev(intf_ad->interface); - - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - snprintf(info->fw_version, sizeof(info->fw_version), "%u.%u", - ad->uiFlashLayoutMajorVersion, - ad->uiFlashLayoutMinorVersion); - - usb_make_path(udev, info->bus_info, sizeof(info->bus_info)); -} - -static u32 bcm_get_link(struct net_device *dev) -{ - struct bcm_mini_adapter *ad = GET_BCM_ADAPTER(dev); - - return ad->LinkUpStatus; -} - -static u32 bcm_get_msglevel(struct net_device *dev) -{ - struct bcm_mini_adapter *ad = GET_BCM_ADAPTER(dev); - - return ad->msg_enable; -} - -static void bcm_set_msglevel(struct net_device *dev, u32 level) -{ - struct bcm_mini_adapter *ad = GET_BCM_ADAPTER(dev); - - ad->msg_enable = level; -} - -static const struct ethtool_ops bcm_ethtool_ops = { - .get_settings = bcm_get_settings, - .get_drvinfo = bcm_get_drvinfo, - .get_link = bcm_get_link, - .get_msglevel = bcm_get_msglevel, - .set_msglevel = bcm_set_msglevel, -}; - -int register_networkdev(struct bcm_mini_adapter *ad) -{ - struct net_device *net = ad->dev; - struct bcm_interface_adapter *intf_ad = ad->pvInterfaceAdapter; - struct usb_interface *udev = intf_ad->interface; - struct usb_device *xdev = intf_ad->udev; - - int result; - - net->netdev_ops = &bcmNetDevOps; - net->ethtool_ops = &bcm_ethtool_ops; - net->mtu = MTU_SIZE; /* 1400 Bytes */ - net->tx_queue_len = TX_QLEN; - net->flags |= IFF_NOARP; - - netif_carrier_off(net); - - SET_NETDEV_DEVTYPE(net, &wimax_type); - - /* Read the MAC Address from EEPROM */ - result = ReadMacAddressFromNVM(ad); - if (result != STATUS_SUCCESS) { - dev_err(&udev->dev, - PFX "Error in Reading the mac Address: %d", result); - return -EIO; - } - - result = register_netdev(net); - if (result) - return result; - - gblpnetdev = ad->dev; - - if (netif_msg_probe(ad)) - dev_info(&udev->dev, PFX "%s: register usb-%s-%s %pM\n", - net->name, xdev->bus->bus_name, xdev->devpath, - net->dev_addr); - - return 0; -} - -void unregister_networkdev(struct bcm_mini_adapter *ad) -{ - struct net_device *net = ad->dev; - struct bcm_interface_adapter *intf_ad = ad->pvInterfaceAdapter; - struct usb_interface *udev = intf_ad->interface; - struct usb_device *xdev = intf_ad->udev; - - if (netif_msg_probe(ad)) - dev_info(&udev->dev, PFX "%s: unregister usb-%s%s\n", - net->name, xdev->bus->bus_name, xdev->devpath); - - unregister_netdev(ad->dev); -} diff --git a/drivers/staging/bcm/CmHost.c b/drivers/staging/bcm/CmHost.c deleted file mode 100644 index adca0ce4d05f..000000000000 --- a/drivers/staging/bcm/CmHost.c +++ /dev/null @@ -1,2254 +0,0 @@ -/************************************************************ - * CMHOST.C - * This file contains the routines for handling Connection - * Management. - ************************************************************/ - -#include "headers.h" - -enum E_CLASSIFIER_ACTION { - eInvalidClassifierAction, - eAddClassifier, - eReplaceClassifier, - eDeleteClassifier -}; - -static ULONG GetNextTargetBufferLocation(struct bcm_mini_adapter *Adapter, - B_UINT16 tid); -static void restore_endianess_of_pstClassifierEntry( - struct bcm_classifier_rule *pstClassifierEntry, - enum bcm_ipaddr_context eIpAddrContext); - -static void apply_phs_rule_to_all_classifiers( - register struct bcm_mini_adapter *Adapter, - register UINT uiSearchRuleIndex, - USHORT uVCID, - struct bcm_phs_rule *sPhsRule, - struct bcm_phs_rules *cPhsRule, - struct bcm_add_indication_alt *pstAddIndication); - -/************************************************************ - * Function - SearchSfid - * - * Description - This routinue would search QOS queues having - * specified SFID as input parameter. - * - * Parameters - Adapter: Pointer to the Adapter structure - * uiSfid : Given SFID for matching - * - * Returns - Queue index for this SFID(If matched) - * Else Invalid Queue Index(If Not matched) - ************************************************************/ -int SearchSfid(struct bcm_mini_adapter *Adapter, UINT uiSfid) -{ - int i; - - for (i = (NO_OF_QUEUES-1); i >= 0; i--) - if (Adapter->PackInfo[i].ulSFID == uiSfid) - return i; - - return NO_OF_QUEUES+1; -} - -/*************************************************************** - * Function -SearchFreeSfid - * - * Description - This routinue would search Free available SFID. - * - * Parameter - Adapter: Pointer to the Adapter structure - * - * Returns - Queue index for the free SFID - * Else returns Invalid Index. - ****************************************************************/ -static int SearchFreeSfid(struct bcm_mini_adapter *Adapter) -{ - int i; - - for (i = 0; i < (NO_OF_QUEUES-1); i++) - if (Adapter->PackInfo[i].ulSFID == 0) - return i; - - return NO_OF_QUEUES+1; -} - -/* - * Function: SearchClsid - * Description: This routinue would search Classifier having specified ClassifierID as input parameter - * Input parameters: struct bcm_mini_adapter *Adapter - Adapter Context - * unsigned int uiSfid - The SF in which the classifier is to searched - * B_UINT16 uiClassifierID - The classifier ID to be searched - * Return: int :Classifier table index of matching entry - */ -static int SearchClsid(struct bcm_mini_adapter *Adapter, - ULONG ulSFID, - B_UINT16 uiClassifierID) -{ - int i; - - for (i = 0; i < MAX_CLASSIFIERS; i++) { - if ((Adapter->astClassifierTable[i].bUsed) && - (Adapter->astClassifierTable[i].uiClassifierRuleIndex - == uiClassifierID) && - (Adapter->astClassifierTable[i].ulSFID == ulSFID)) - return i; - } - - return MAX_CLASSIFIERS+1; -} - -/* - * @ingroup ctrl_pkt_functions - * This routinue would search Free available Classifier entry in classifier table. - * @return free Classifier Entry index in classifier table for specified SF - */ -static int SearchFreeClsid(struct bcm_mini_adapter *Adapter /**Adapter Context*/) -{ - int i; - - for (i = 0; i < MAX_CLASSIFIERS; i++) { - if (!Adapter->astClassifierTable[i].bUsed) - return i; - } - - return MAX_CLASSIFIERS+1; -} - -static VOID deleteSFBySfid(struct bcm_mini_adapter *Adapter, - UINT uiSearchRuleIndex) -{ - /* deleting all the packet held in the SF */ - flush_queue(Adapter, uiSearchRuleIndex); - - /* Deleting the all classifiers for this SF */ - DeleteAllClassifiersForSF(Adapter, uiSearchRuleIndex); - - /* Resetting only MIBS related entries in the SF */ - memset((PVOID)&Adapter->PackInfo[uiSearchRuleIndex], 0, - sizeof(struct bcm_mibs_table)); -} - -static inline VOID -CopyIpAddrToClassifier(struct bcm_classifier_rule *pstClassifierEntry, - B_UINT8 u8IpAddressLen, B_UINT8 *pu8IpAddressMaskSrc, - bool bIpVersion6, enum bcm_ipaddr_context eIpAddrContext) -{ - int i = 0; - UINT nSizeOfIPAddressInBytes = IP_LENGTH_OF_ADDRESS; - UCHAR *ptrClassifierIpAddress = NULL; - UCHAR *ptrClassifierIpMask = NULL; - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - - if (bIpVersion6) - nSizeOfIPAddressInBytes = IPV6_ADDRESS_SIZEINBYTES; - - /* Destination Ip Address */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "Ip Address Range Length:0x%X ", u8IpAddressLen); - if ((bIpVersion6 ? (IPV6_ADDRESS_SIZEINBYTES * MAX_IP_RANGE_LENGTH * 2) : - (TOTAL_MASKED_ADDRESS_IN_BYTES)) >= u8IpAddressLen) { - - union u_ip_address *st_dest_ip = - &pstClassifierEntry->stDestIpAddress; - - union u_ip_address *st_src_ip = - &pstClassifierEntry->stSrcIpAddress; - - /* - * checking both the mask and address togethor in Classification. - * So length will be : TotalLengthInBytes/nSizeOfIPAddressInBytes * 2 - * (nSizeOfIPAddressInBytes for address and nSizeOfIPAddressInBytes for mask) - */ - if (eIpAddrContext == eDestIpAddress) { - pstClassifierEntry->ucIPDestinationAddressLength = - u8IpAddressLen/(nSizeOfIPAddressInBytes * 2); - if (bIpVersion6) { - ptrClassifierIpAddress = - st_dest_ip->ucIpv6Address; - ptrClassifierIpMask = - st_dest_ip->ucIpv6Mask; - } else { - ptrClassifierIpAddress = - st_dest_ip->ucIpv4Address; - ptrClassifierIpMask = - st_dest_ip->ucIpv4Mask; - } - } else if (eIpAddrContext == eSrcIpAddress) { - pstClassifierEntry->ucIPSourceAddressLength = - u8IpAddressLen/(nSizeOfIPAddressInBytes * 2); - if (bIpVersion6) { - ptrClassifierIpAddress = - st_src_ip->ucIpv6Address; - ptrClassifierIpMask = st_src_ip->ucIpv6Mask; - } else { - ptrClassifierIpAddress = - st_src_ip->ucIpv4Address; - ptrClassifierIpMask = st_src_ip->ucIpv4Mask; - } - } - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "Address Length:0x%X\n", - pstClassifierEntry->ucIPDestinationAddressLength); - while ((u8IpAddressLen >= nSizeOfIPAddressInBytes) - && (i < MAX_IP_RANGE_LENGTH)) { - memcpy(ptrClassifierIpAddress + - (i * nSizeOfIPAddressInBytes), - (pu8IpAddressMaskSrc - + (i * nSizeOfIPAddressInBytes * 2)), - nSizeOfIPAddressInBytes); - - if (!bIpVersion6) { - if (eIpAddrContext == eSrcIpAddress) { - st_src_ip->ulIpv4Addr[i] = - ntohl(st_src_ip->ulIpv4Addr[i]); - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_OTHERS, - CONN_MSG, - DBG_LVL_ALL, - "Src Ip Address:0x%luX ", - st_src_ip->ulIpv4Addr[i]); - } else if (eIpAddrContext == eDestIpAddress) { - st_dest_ip->ulIpv4Addr[i] = - ntohl(st_dest_ip->ulIpv4Addr[i]); - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_OTHERS, - CONN_MSG, - DBG_LVL_ALL, - "Dest Ip Address:0x%luX ", - st_dest_ip->ulIpv4Addr[i]); - } - } - u8IpAddressLen -= nSizeOfIPAddressInBytes; - if (u8IpAddressLen >= nSizeOfIPAddressInBytes) { - memcpy(ptrClassifierIpMask + - (i * nSizeOfIPAddressInBytes), - (pu8IpAddressMaskSrc - + nSizeOfIPAddressInBytes - + (i * nSizeOfIPAddressInBytes * 2)), - nSizeOfIPAddressInBytes); - - if (!bIpVersion6) { - if (eIpAddrContext == eSrcIpAddress) { - st_src_ip->ulIpv4Mask[i] = - ntohl(st_src_ip->ulIpv4Mask[i]); - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_OTHERS, - CONN_MSG, - DBG_LVL_ALL, - "Src Ip Mask Address:0x%luX ", - st_src_ip->ulIpv4Mask[i]); - } else if (eIpAddrContext == eDestIpAddress) { - st_dest_ip->ulIpv4Mask[i] = - ntohl(st_dest_ip->ulIpv4Mask[i]); - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_OTHERS, - CONN_MSG, - DBG_LVL_ALL, - "Dest Ip Mask Address:0x%luX ", - st_dest_ip->ulIpv4Mask[i]); - } - } - u8IpAddressLen -= nSizeOfIPAddressInBytes; - } - if (u8IpAddressLen == 0) - pstClassifierEntry->bDestIpValid = TRUE; - - i++; - } - if (bIpVersion6) { - /* Restore EndianNess of Struct */ - restore_endianess_of_pstClassifierEntry( - pstClassifierEntry, - eIpAddrContext - ); - } - } -} - -void ClearTargetDSXBuffer(struct bcm_mini_adapter *Adapter, B_UINT16 TID, bool bFreeAll) -{ - int i; - struct bcm_targetdsx_buffer *curr_buf; - - for (i = 0; i < Adapter->ulTotalTargetBuffersAvailable; i++) { - curr_buf = &Adapter->astTargetDsxBuffer[i]; - - if (curr_buf->valid) - continue; - - if ((bFreeAll) || (curr_buf->tid == TID)) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, - "ClearTargetDSXBuffer: found tid %d buffer cleared %lx\n", - TID, curr_buf->ulTargetDsxBuffer); - curr_buf->valid = 1; - curr_buf->tid = 0; - Adapter->ulFreeTargetBufferCnt++; - } - } -} - -/* - * @ingroup ctrl_pkt_functions - * copy classifier rule into the specified SF index - */ -static inline VOID CopyClassifierRuleToSF(struct bcm_mini_adapter *Adapter, - struct bcm_convergence_types *psfCSType, - UINT uiSearchRuleIndex, - UINT nClassifierIndex) -{ - struct bcm_classifier_rule *pstClassifierEntry = NULL; - /* VOID *pvPhsContext = NULL; */ - int i; - /* UCHAR ucProtocolLength=0; */ - /* ULONG ulPhsStatus; */ - - struct bcm_packet_class_rules *pack_class_rule = - &psfCSType->cCPacketClassificationRule; - - if (Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value == 0 || - nClassifierIndex > (MAX_CLASSIFIERS-1)) - return; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "Storing Classifier Rule Index : %X", - ntohs(pack_class_rule->u16PacketClassificationRuleIndex)); - - if (nClassifierIndex > MAX_CLASSIFIERS-1) - return; - - pstClassifierEntry = &Adapter->astClassifierTable[nClassifierIndex]; - if (pstClassifierEntry) { - /* Store if Ipv6 */ - pstClassifierEntry->bIpv6Protocol = - (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6) ? TRUE : false; - - /* Destinaiton Port */ - pstClassifierEntry->ucDestPortRangeLength = - pack_class_rule->u8ProtocolDestPortRangeLength / 4; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "Destination Port Range Length:0x%X ", - pstClassifierEntry->ucDestPortRangeLength); - - if (pack_class_rule->u8ProtocolDestPortRangeLength <= MAX_PORT_RANGE) { - for (i = 0; i < (pstClassifierEntry->ucDestPortRangeLength); i++) { - pstClassifierEntry->usDestPortRangeLo[i] = - *((PUSHORT)(pack_class_rule->u8ProtocolDestPortRange+i)); - pstClassifierEntry->usDestPortRangeHi[i] = - *((PUSHORT)(pack_class_rule->u8ProtocolDestPortRange+2+i)); - pstClassifierEntry->usDestPortRangeLo[i] = - ntohs(pstClassifierEntry->usDestPortRangeLo[i]); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, - CONN_MSG, DBG_LVL_ALL, - "Destination Port Range Lo:0x%X ", - pstClassifierEntry->usDestPortRangeLo[i]); - pstClassifierEntry->usDestPortRangeHi[i] = - ntohs(pstClassifierEntry->usDestPortRangeHi[i]); - } - } else { - pstClassifierEntry->ucDestPortRangeLength = 0; - } - - /* Source Port */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "Source Port Range Length:0x%X ", - pack_class_rule->u8ProtocolSourcePortRangeLength); - if (pack_class_rule->u8ProtocolSourcePortRangeLength <= MAX_PORT_RANGE) { - pstClassifierEntry->ucSrcPortRangeLength = - pack_class_rule->u8ProtocolSourcePortRangeLength/4; - for (i = 0; i < (pstClassifierEntry->ucSrcPortRangeLength); i++) { - pstClassifierEntry->usSrcPortRangeLo[i] = - *((PUSHORT)(pack_class_rule-> - u8ProtocolSourcePortRange+i)); - pstClassifierEntry->usSrcPortRangeHi[i] = - *((PUSHORT)(pack_class_rule-> - u8ProtocolSourcePortRange+2+i)); - pstClassifierEntry->usSrcPortRangeLo[i] = - ntohs(pstClassifierEntry->usSrcPortRangeLo[i]); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, - CONN_MSG, DBG_LVL_ALL, - "Source Port Range Lo:0x%X ", - pstClassifierEntry->usSrcPortRangeLo[i]); - pstClassifierEntry->usSrcPortRangeHi[i] = - ntohs(pstClassifierEntry->usSrcPortRangeHi[i]); - } - } - /* Destination Ip Address and Mask */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "Ip Destination Parameters : "); - CopyIpAddrToClassifier(pstClassifierEntry, - pack_class_rule->u8IPDestinationAddressLength, - pack_class_rule->u8IPDestinationAddress, - (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6) ? - TRUE : false, eDestIpAddress); - - /* Source Ip Address and Mask */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "Ip Source Parameters : "); - - CopyIpAddrToClassifier(pstClassifierEntry, - pack_class_rule->u8IPMaskedSourceAddressLength, - pack_class_rule->u8IPMaskedSourceAddress, - (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6) ? TRUE : false, - eSrcIpAddress); - - /* TOS */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "TOS Length:0x%X ", - pack_class_rule->u8IPTypeOfServiceLength); - if (pack_class_rule->u8IPTypeOfServiceLength == 3) { - pstClassifierEntry->ucIPTypeOfServiceLength = - pack_class_rule->u8IPTypeOfServiceLength; - pstClassifierEntry->ucTosLow = - pack_class_rule->u8IPTypeOfService[0]; - pstClassifierEntry->ucTosHigh = - pack_class_rule->u8IPTypeOfService[1]; - pstClassifierEntry->ucTosMask = - pack_class_rule->u8IPTypeOfService[2]; - pstClassifierEntry->bTOSValid = TRUE; - } - if (pack_class_rule->u8Protocol == 0) { - /* we didn't get protocol field filled in by the BS */ - pstClassifierEntry->ucProtocolLength = 0; - } else { - pstClassifierEntry->ucProtocolLength = 1; /* 1 valid protocol */ - } - - pstClassifierEntry->ucProtocol[0] = pack_class_rule->u8Protocol; - pstClassifierEntry->u8ClassifierRulePriority = - pack_class_rule->u8ClassifierRulePriority; - - /* store the classifier rule ID and set this classifier entry as valid */ - pstClassifierEntry->ucDirection = - Adapter->PackInfo[uiSearchRuleIndex].ucDirection; - pstClassifierEntry->uiClassifierRuleIndex = - ntohs(pack_class_rule->u16PacketClassificationRuleIndex); - pstClassifierEntry->usVCID_Value = - Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value; - pstClassifierEntry->ulSFID = - Adapter->PackInfo[uiSearchRuleIndex].ulSFID; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "Search Index %d Dir: %d, Index: %d, Vcid: %d\n", - uiSearchRuleIndex, - pstClassifierEntry->ucDirection, - pstClassifierEntry->uiClassifierRuleIndex, - pstClassifierEntry->usVCID_Value); - - if (pack_class_rule->u8AssociatedPHSI) - pstClassifierEntry->u8AssociatedPHSI = - pack_class_rule->u8AssociatedPHSI; - - /* Copy ETH CS Parameters */ - pstClassifierEntry->ucEthCSSrcMACLen = - (pack_class_rule->u8EthernetSourceMACAddressLength); - memcpy(pstClassifierEntry->au8EThCSSrcMAC, - pack_class_rule->u8EthernetSourceMACAddress, - MAC_ADDRESS_SIZE); - memcpy(pstClassifierEntry->au8EThCSSrcMACMask, - pack_class_rule->u8EthernetSourceMACAddress - + MAC_ADDRESS_SIZE, MAC_ADDRESS_SIZE); - pstClassifierEntry->ucEthCSDestMACLen = - (pack_class_rule->u8EthernetDestMacAddressLength); - memcpy(pstClassifierEntry->au8EThCSDestMAC, - pack_class_rule->u8EthernetDestMacAddress, - MAC_ADDRESS_SIZE); - memcpy(pstClassifierEntry->au8EThCSDestMACMask, - pack_class_rule->u8EthernetDestMacAddress - + MAC_ADDRESS_SIZE, MAC_ADDRESS_SIZE); - pstClassifierEntry->ucEtherTypeLen = - (pack_class_rule->u8EthertypeLength); - memcpy(pstClassifierEntry->au8EthCSEtherType, - pack_class_rule->u8Ethertype, - NUM_ETHERTYPE_BYTES); - memcpy(pstClassifierEntry->usUserPriority, - &pack_class_rule->u16UserPriority, 2); - pstClassifierEntry->usVLANID = - ntohs(pack_class_rule->u16VLANID); - pstClassifierEntry->usValidityBitMap = - ntohs(pack_class_rule->u16ValidityBitMap); - - pstClassifierEntry->bUsed = TRUE; - } -} - -/* - * @ingroup ctrl_pkt_functions - */ -static inline VOID DeleteClassifierRuleFromSF(struct bcm_mini_adapter *Adapter, - UINT uiSearchRuleIndex, UINT nClassifierIndex) -{ - struct bcm_classifier_rule *pstClassifierEntry = NULL; - B_UINT16 u16PacketClassificationRuleIndex; - USHORT usVCID; - /* VOID *pvPhsContext = NULL; */ - /*ULONG ulPhsStatus; */ - - usVCID = Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value; - - if (nClassifierIndex > MAX_CLASSIFIERS-1) - return; - - if (usVCID == 0) - return; - - u16PacketClassificationRuleIndex = - Adapter->astClassifierTable[nClassifierIndex].uiClassifierRuleIndex; - pstClassifierEntry = &Adapter->astClassifierTable[nClassifierIndex]; - if (pstClassifierEntry) { - pstClassifierEntry->bUsed = false; - pstClassifierEntry->uiClassifierRuleIndex = 0; - memset(pstClassifierEntry, 0, - sizeof(struct bcm_classifier_rule)); - - /* Delete the PHS Rule for this classifier */ - PhsDeleteClassifierRule(&Adapter->stBCMPhsContext, usVCID, - u16PacketClassificationRuleIndex); - } -} - -/* - * @ingroup ctrl_pkt_functions - */ -VOID DeleteAllClassifiersForSF(struct bcm_mini_adapter *Adapter, - UINT uiSearchRuleIndex) -{ - struct bcm_classifier_rule *pstClassifierEntry = NULL; - int i; - /* B_UINT16 u16PacketClassificationRuleIndex; */ - USHORT ulVCID; - /* VOID *pvPhsContext = NULL; */ - /* ULONG ulPhsStatus; */ - - ulVCID = Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value; - - if (ulVCID == 0) - return; - - for (i = 0; i < MAX_CLASSIFIERS; i++) { - if (Adapter->astClassifierTable[i].usVCID_Value == ulVCID) { - pstClassifierEntry = &Adapter->astClassifierTable[i]; - - if (pstClassifierEntry->bUsed) - DeleteClassifierRuleFromSF(Adapter, - uiSearchRuleIndex, i); - } - } - - /* Delete All Phs Rules Associated with this SF */ - PhsDeleteSFRules(&Adapter->stBCMPhsContext, ulVCID); -} - -/* - * This routinue copies the Connection Management - * related data into the Adapter structure. - * @ingroup ctrl_pkt_functions - */ -static VOID CopyToAdapter(register struct bcm_mini_adapter *Adapter, /* PackInfo[uiSearchRuleIndex]; - USHORT uVCID = curr_packinfo->usVCID_Value; - UINT UGIValue = 0; - - curr_packinfo->bValid = TRUE; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "Search Rule Index = %d\n", uiSearchRuleIndex); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "%s: SFID= %x ", __func__, ntohl(psfLocalSet->u32SFID)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "Updating Queue %d", uiSearchRuleIndex); - - ulSFID = ntohl(psfLocalSet->u32SFID); - /* Store IP Version used */ - /* Get The Version Of IP used (IPv6 or IPv4) from CSSpecification field of SF */ - - curr_packinfo->bIPCSSupport = 0; - curr_packinfo->bEthCSSupport = 0; - - /* Enable IP/ETh CS Support As Required */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "CopyToAdapter : u8CSSpecification : %X\n", - psfLocalSet->u8CSSpecification); - switch (psfLocalSet->u8CSSpecification) { - case eCSPacketIPV4: - curr_packinfo->bIPCSSupport = IPV4_CS; - break; - case eCSPacketIPV6: - curr_packinfo->bIPCSSupport = IPV6_CS; - break; - case eCS802_3PacketEthernet: - case eCS802_1QPacketVLAN: - curr_packinfo->bEthCSSupport = ETH_CS_802_3; - break; - case eCSPacketIPV4Over802_1QVLAN: - case eCSPacketIPV4Over802_3Ethernet: - curr_packinfo->bIPCSSupport = IPV4_CS; - curr_packinfo->bEthCSSupport = ETH_CS_802_3; - break; - case eCSPacketIPV6Over802_1QVLAN: - case eCSPacketIPV6Over802_3Ethernet: - curr_packinfo->bIPCSSupport = IPV6_CS; - curr_packinfo->bEthCSSupport = ETH_CS_802_3; - break; - default: - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "Error in value of CS Classification.. setting default to IP CS\n"); - curr_packinfo->bIPCSSupport = IPV4_CS; - break; - } - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "CopyToAdapter : Queue No : %X ETH CS Support : %X , IP CS Support : %X\n", - uiSearchRuleIndex, - curr_packinfo->bEthCSSupport, - curr_packinfo->bIPCSSupport); - - /* Store IP Version used */ - /* Get The Version Of IP used (IPv6 or IPv4) from CSSpecification field of SF */ - if (curr_packinfo->bIPCSSupport == IPV6_CS) - curr_packinfo->ucIpVersion = IPV6; - else - curr_packinfo->ucIpVersion = IPV4; - - /* To ensure that the ETH CS code doesn't gets executed if the BS doesn't supports ETH CS */ - if (!Adapter->bETHCSEnabled) - curr_packinfo->bEthCSSupport = 0; - - if (psfLocalSet->u8ServiceClassNameLength > 0 && psfLocalSet->u8ServiceClassNameLength < 32) - memcpy(curr_packinfo->ucServiceClassName, - psfLocalSet->u8ServiceClassName, - psfLocalSet->u8ServiceClassNameLength); - - curr_packinfo->u8QueueType = psfLocalSet->u8ServiceFlowSchedulingType; - - if (curr_packinfo->u8QueueType == BE && curr_packinfo->ucDirection) - Adapter->usBestEffortQueueIndex = uiSearchRuleIndex; - - curr_packinfo->ulSFID = ntohl(psfLocalSet->u32SFID); - - curr_packinfo->u8TrafficPriority = psfLocalSet->u8TrafficPriority; - - /* copy all the classifier in the Service Flow param structure */ - for (i = 0; i < psfLocalSet->u8TotalClassifiers; i++) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "Classifier index =%d", i); - psfCSType = &psfLocalSet->cConvergenceSLTypes[i]; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "Classifier index =%d", i); - - if (psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority) - curr_packinfo->bClassifierPriority = TRUE; - - if (psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority) - curr_packinfo->bClassifierPriority = TRUE; - - if (ucDsxType == DSA_ACK) { - eClassifierAction = eAddClassifier; - } else if (ucDsxType == DSC_ACK) { - switch (psfCSType->u8ClassfierDSCAction) { - case 0: /* DSC Add Classifier */ - eClassifierAction = eAddClassifier; - break; - case 1: /* DSC Replace Classifier */ - eClassifierAction = eReplaceClassifier; - break; - case 2: /* DSC Delete Classifier */ - eClassifierAction = eDeleteClassifier; - break; - default: - eClassifierAction = eInvalidClassifierAction; - } - } - - u16PacketClassificationRuleIndex = ntohs(psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex); - - switch (eClassifierAction) { - case eAddClassifier: - /* Get a Free Classifier Index From Classifier table for this SF to add the Classifier */ - /* Contained in this message */ - nClassifierIndex = SearchClsid(Adapter, - ulSFID, - u16PacketClassificationRuleIndex); - - if (nClassifierIndex > MAX_CLASSIFIERS) { - nClassifierIndex = SearchFreeClsid(Adapter); - if (nClassifierIndex > MAX_CLASSIFIERS) { - /* Failed To get a free Entry */ - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_OTHERS, - CONN_MSG, - DBG_LVL_ALL, - "Error Failed To get a free Classifier Entry"); - break; - } - /* Copy the Classifier Rule for this service flow into our Classifier table maintained per SF. */ - CopyClassifierRuleToSF(Adapter, psfCSType, - uiSearchRuleIndex, - nClassifierIndex); - } else { - /* This Classifier Already Exists and it is invalid to Add Classifier with existing PCRI */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, - CONN_MSG, - DBG_LVL_ALL, - "CopyToAdapter: Error The Specified Classifier Already Exists and attempted To Add Classifier with Same PCRI : 0x%x\n", - u16PacketClassificationRuleIndex); - } - break; - case eReplaceClassifier: - /* Get the Classifier Index From Classifier table for this SF and replace existing Classifier */ - /* with the new classifier Contained in this message */ - nClassifierIndex = SearchClsid(Adapter, ulSFID, - u16PacketClassificationRuleIndex); - if (nClassifierIndex > MAX_CLASSIFIERS) { - /* Failed To search the classifier */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, - CONN_MSG, DBG_LVL_ALL, - "Error Search for Classifier To be replaced failed"); - break; - } - /* Copy the Classifier Rule for this service flow into our Classifier table maintained per SF. */ - CopyClassifierRuleToSF(Adapter, psfCSType, - uiSearchRuleIndex, nClassifierIndex); - break; - case eDeleteClassifier: - /* Get the Classifier Index From Classifier table for this SF and replace existing Classifier */ - /* with the new classifier Contained in this message */ - nClassifierIndex = SearchClsid(Adapter, ulSFID, - u16PacketClassificationRuleIndex); - if (nClassifierIndex > MAX_CLASSIFIERS) { - /* Failed To search the classifier */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, - CONN_MSG, DBG_LVL_ALL, - "Error Search for Classifier To be deleted failed"); - break; - } - - /* Delete This classifier */ - DeleteClassifierRuleFromSF(Adapter, uiSearchRuleIndex, - nClassifierIndex); - break; - default: - /* Invalid Action for classifier */ - break; - } - } - - /* Repeat parsing Classification Entries to process PHS Rules */ - for (i = 0; i < psfLocalSet->u8TotalClassifiers; i++) { - psfCSType = &psfLocalSet->cConvergenceSLTypes[i]; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "psfCSType->u8PhsDSCAction : 0x%x\n", - psfCSType->u8PhsDSCAction); - - switch (psfCSType->u8PhsDSCAction) { - case eDeleteAllPHSRules: - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, - DBG_LVL_ALL, - "Deleting All PHS Rules For VCID: 0x%X\n", - uVCID); - - /* Delete All the PHS rules for this Service flow */ - PhsDeleteSFRules(&Adapter->stBCMPhsContext, uVCID); - break; - case eDeletePHSRule: - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, - DBG_LVL_ALL, - "PHS DSC Action = Delete PHS Rule\n"); - - if (psfCSType->cPhsRule.u8PHSI) - PhsDeletePHSRule(&Adapter->stBCMPhsContext, - uVCID, - psfCSType->cCPacketClassificationRule.u8AssociatedPHSI); - - break; - default: - if (ucDsxType == DSC_ACK) { - /* BCM_DEBUG_PRINT(CONN_MSG,("Invalid PHS DSC Action For DSC\n",psfCSType->cPhsRule.u8PHSI)); */ - break; /* FOr DSC ACK Case PHS DSC Action must be in valid set */ - } - /* Proceed To Add PHS rule for DSA_ACK case even if PHS DSC action is unspecified */ - /* No Break Here . Intentionally! */ - - case eAddPHSRule: - case eSetPHSRule: - if (psfCSType->cPhsRule.u8PHSI) { - /* Apply This PHS Rule to all classifiers whose Associated PHSI Match */ - apply_phs_rule_to_all_classifiers(Adapter, - uiSearchRuleIndex, - uVCID, - &sPhsRule, - &psfCSType->cPhsRule, - pstAddIndication); - } - break; - } - } - - if (psfLocalSet->u32MaxSustainedTrafficRate == 0) { - /* No Rate Limit . Set Max Sustained Traffic Rate to Maximum */ - curr_packinfo->uiMaxAllowedRate = WIMAX_MAX_ALLOWED_RATE; - } else if (ntohl(psfLocalSet->u32MaxSustainedTrafficRate) > WIMAX_MAX_ALLOWED_RATE) { - /* Too large Allowed Rate specified. Limiting to Wi Max Allowed rate */ - curr_packinfo->uiMaxAllowedRate = WIMAX_MAX_ALLOWED_RATE; - } else { - curr_packinfo->uiMaxAllowedRate = - ntohl(psfLocalSet->u32MaxSustainedTrafficRate); - } - - curr_packinfo->uiMaxLatency = ntohl(psfLocalSet->u32MaximumLatency); - if (curr_packinfo->uiMaxLatency == 0) /* 0 should be treated as infinite */ - curr_packinfo->uiMaxLatency = MAX_LATENCY_ALLOWED; - - if ((curr_packinfo->u8QueueType == ERTPS || - curr_packinfo->u8QueueType == UGS)) - UGIValue = ntohs(psfLocalSet->u16UnsolicitedGrantInterval); - - if (UGIValue == 0) - UGIValue = DEFAULT_UG_INTERVAL; - - /* - * For UGI based connections... - * DEFAULT_UGI_FACTOR*UGIInterval worth of data is the max token count at host... - * The extra amount of token is to ensure that a large amount of jitter won't have loss in throughput... - * In case of non-UGI based connection, 200 frames worth of data is the max token count at host... - */ - curr_packinfo->uiMaxBucketSize = - (DEFAULT_UGI_FACTOR*curr_packinfo->uiMaxAllowedRate*UGIValue)/1000; - - if (curr_packinfo->uiMaxBucketSize < WIMAX_MAX_MTU*8) { - UINT UGIFactor = 0; - /* Special Handling to ensure the biggest size of packet can go out from host to FW as follows: - * 1. Any packet from Host to FW can go out in different packet size. - * 2. So in case the Bucket count is smaller than MTU, the packets of size (Size > TokenCount), will get dropped. - * 3. We can allow packets of MaxSize from Host->FW that can go out from FW in multiple SDUs by fragmentation at Wimax Layer - */ - UGIFactor = (curr_packinfo->uiMaxLatency/UGIValue + 1); - - if (UGIFactor > DEFAULT_UGI_FACTOR) - curr_packinfo->uiMaxBucketSize = - (UGIFactor*curr_packinfo->uiMaxAllowedRate*UGIValue)/1000; - - if (curr_packinfo->uiMaxBucketSize > WIMAX_MAX_MTU*8) - curr_packinfo->uiMaxBucketSize = WIMAX_MAX_MTU*8; - } - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "LAT: %d, UGI: %d\n", curr_packinfo->uiMaxLatency, - UGIValue); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "uiMaxAllowedRate: 0x%x, u32MaxSustainedTrafficRate: 0x%x ,uiMaxBucketSize: 0x%x", - curr_packinfo->uiMaxAllowedRate, - ntohl(psfLocalSet->u32MaxSustainedTrafficRate), - curr_packinfo->uiMaxBucketSize); - - /* copy the extended SF Parameters to Support MIBS */ - CopyMIBSExtendedSFParameters(Adapter, psfLocalSet, uiSearchRuleIndex); - - /* store header suppression enabled flag per SF */ - curr_packinfo->bHeaderSuppressionEnabled = - !(psfLocalSet->u8RequesttransmissionPolicy & - MASK_DISABLE_HEADER_SUPPRESSION); - - kfree(curr_packinfo->pstSFIndication); - curr_packinfo->pstSFIndication = pstAddIndication; - - /* Re Sort the SF list in PackInfo according to Traffic Priority */ - SortPackInfo(Adapter); - - /* Re Sort the Classifier Rules table and re - arrange - * according to Classifier Rule Priority - */ - SortClassifiers(Adapter); - DumpPhsRules(&Adapter->stBCMPhsContext); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "%s <=====", __func__); -} - -/*********************************************************************** - * Function - DumpCmControlPacket - * - * Description - This routinue Dumps the Contents of the AddIndication - * Structure in the Connection Management Control Packet - * - * Parameter - pvBuffer: Pointer to the buffer containing the - * AddIndication data. - * - * Returns - None - *************************************************************************/ -static VOID DumpCmControlPacket(PVOID pvBuffer) -{ - int uiLoopIndex; - int nIndex; - struct bcm_add_indication_alt *pstAddIndication; - UINT nCurClassifierCnt; - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - - pstAddIndication = pvBuffer; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "======>"); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Type: 0x%X", pstAddIndication->u8Type); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Direction: 0x%X", pstAddIndication->u8Direction); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TID: 0x%X", ntohs(pstAddIndication->u16TID)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", ntohs(pstAddIndication->u16CID)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VCID: 0x%X", ntohs(pstAddIndication->u16VCID)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " AuthorizedSet--->"); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID: 0x%X", htonl(pstAddIndication->sfAuthorizedSet.u32SFID)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", htons(pstAddIndication->sfAuthorizedSet.u16CID)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength: 0x%X", - pstAddIndication->sfAuthorizedSet.u8ServiceClassNameLength); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassName: 0x%X ,0x%X , 0x%X, 0x%X, 0x%X, 0x%X", - pstAddIndication->sfAuthorizedSet.u8ServiceClassName[0], - pstAddIndication->sfAuthorizedSet.u8ServiceClassName[1], - pstAddIndication->sfAuthorizedSet.u8ServiceClassName[2], - pstAddIndication->sfAuthorizedSet.u8ServiceClassName[3], - pstAddIndication->sfAuthorizedSet.u8ServiceClassName[4], - pstAddIndication->sfAuthorizedSet.u8ServiceClassName[5]); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService: 0x%X", pstAddIndication->sfAuthorizedSet.u8MBSService); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet: 0x%X", pstAddIndication->sfAuthorizedSet.u8QosParamSet); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority: 0x%X, %p", - pstAddIndication->sfAuthorizedSet.u8TrafficPriority, &pstAddIndication->sfAuthorizedSet.u8TrafficPriority); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxSustainedTrafficRate: 0x%X 0x%p", - pstAddIndication->sfAuthorizedSet.u32MaxSustainedTrafficRate, - &pstAddIndication->sfAuthorizedSet.u32MaxSustainedTrafficRate); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst: 0x%X", pstAddIndication->sfAuthorizedSet.u32MaxTrafficBurst); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate : 0x%X", - pstAddIndication->sfAuthorizedSet.u32MinReservedTrafficRate); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength: 0x%X", - pstAddIndication->sfAuthorizedSet.u8VendorSpecificQoSParamLength); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam: 0x%X", - pstAddIndication->sfAuthorizedSet.u8VendorSpecificQoSParam[0]); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType: 0x%X", - pstAddIndication->sfAuthorizedSet.u8ServiceFlowSchedulingType); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter: 0x%X", pstAddIndication->sfAuthorizedSet.u32ToleratedJitter); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency: 0x%X", pstAddIndication->sfAuthorizedSet.u32MaximumLatency); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%X", - pstAddIndication->sfAuthorizedSet.u8FixedLengthVSVariableLengthSDUIndicator); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize: 0x%X", pstAddIndication->sfAuthorizedSet.u8SDUSize); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TargetSAID: 0x%X", pstAddIndication->sfAuthorizedSet.u16TargetSAID); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQEnable: 0x%X", pstAddIndication->sfAuthorizedSet.u8ARQEnable); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQWindowSize: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQWindowSize); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryTxTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQRetryTxTimeOut); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryRxTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQRetryRxTimeOut); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockLifeTime: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQBlockLifeTime); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQSyncLossTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQSyncLossTimeOut); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQDeliverInOrder: 0x%X", pstAddIndication->sfAuthorizedSet.u8ARQDeliverInOrder); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRxPurgeTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQRxPurgeTimeOut); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockSize: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQBlockSize); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8CSSpecification: 0x%X", pstAddIndication->sfAuthorizedSet.u8CSSpecification); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TypeOfDataDeliveryService: 0x%X", - pstAddIndication->sfAuthorizedSet.u8TypeOfDataDeliveryService); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16SDUInterArrivalTime: 0x%X", pstAddIndication->sfAuthorizedSet.u16SDUInterArrivalTime); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TimeBase: 0x%X", pstAddIndication->sfAuthorizedSet.u16TimeBase); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8PagingPreference: 0x%X", pstAddIndication->sfAuthorizedSet.u8PagingPreference); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UnsolicitedPollingInterval: 0x%X", - pstAddIndication->sfAuthorizedSet.u16UnsolicitedPollingInterval); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "sfAuthorizedSet.u8HARQChannelMapping %x %x %x ", - *(unsigned int *)pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping, - *(unsigned int *)&pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping[4], - *(USHORT *)&pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping[8]); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficIndicationPreference: 0x%X", - pstAddIndication->sfAuthorizedSet.u8TrafficIndicationPreference); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received: 0x%X", pstAddIndication->sfAuthorizedSet.u8TotalClassifiers); - - nCurClassifierCnt = pstAddIndication->sfAuthorizedSet.u8TotalClassifiers; - if (nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF) - nCurClassifierCnt = MAX_CLASSIFIERS_IN_SF; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "pstAddIndication->sfAuthorizedSet.bValid %d", pstAddIndication->sfAuthorizedSet.bValid); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "pstAddIndication->sfAuthorizedSet.u16MacOverhead %x", pstAddIndication->sfAuthorizedSet.u16MacOverhead); - if (!pstAddIndication->sfAuthorizedSet.bValid) - pstAddIndication->sfAuthorizedSet.bValid = 1; - for (nIndex = 0; nIndex < nCurClassifierCnt; nIndex++) { - struct bcm_convergence_types *psfCSType = NULL; - - psfCSType = &pstAddIndication->sfAuthorizedSet.cConvergenceSLTypes[nIndex]; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "psfCSType = %p", psfCSType); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "CCPacketClassificationRuleSI====>"); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ClassifierRulePriority: 0x%X ", - psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfServiceLength: 0x%X ", - psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfService[3]: 0x%X ,0x%X ,0x%X ", - psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0], - psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1], - psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]); - - for (uiLoopIndex = 0; uiLoopIndex < 1; uiLoopIndex++) - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Protocol: 0x%02X ", - psfCSType->cCPacketClassificationRule.u8Protocol); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength: 0x%X ", - psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength); - - for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++) - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32]: 0x%02X ", - psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength: 0x%X ", - psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength); - - for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++) - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddress[32]: 0x%02X ", - psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRangeLength:0x%X ", - psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRange[4]: 0x%02X ,0x%02X ,0x%02X ,0x%02X ", - psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[0], - psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[1], - psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[2], - psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[3]); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRangeLength: 0x%02X ", - psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRange[4]: 0x%02X ,0x%02X ,0x%02X ,0x%02X ", - psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[0], - psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[1], - psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[2], - psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[3]); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddressLength: 0x%02X ", - psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, "u8EthernetDestMacAddress[6]: %pM", - psfCSType->cCPacketClassificationRule. - u8EthernetDestMacAddress); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddressLength: 0x%02X ", - psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]: %pM", - psfCSType->cCPacketClassificationRule. - u8EthernetSourceMACAddress); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthertypeLength: 0x%02X ", - psfCSType->cCPacketClassificationRule.u8EthertypeLength); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Ethertype[3]: 0x%02X ,0x%02X ,0x%02X ", - psfCSType->cCPacketClassificationRule.u8Ethertype[0], - psfCSType->cCPacketClassificationRule.u8Ethertype[1], - psfCSType->cCPacketClassificationRule.u8Ethertype[2]); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UserPriority: 0x%X ", psfCSType->cCPacketClassificationRule.u16UserPriority); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VLANID: 0x%X ", psfCSType->cCPacketClassificationRule.u16VLANID); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8AssociatedPHSI: 0x%02X ", psfCSType->cCPacketClassificationRule.u8AssociatedPHSI); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16PacketClassificationRuleIndex: 0x%X ", - psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParamLength: 0x%X ", - psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParam[1]: 0x%X ", - psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]); -#ifdef VERSION_D5 - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLableLength: 0x%X ", - psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, "u8IPv6FlowLable[6]: 0x%*ph ", - 6, psfCSType->cCPacketClassificationRule. - u8IPv6FlowLable); -#endif - } - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "bValid: 0x%02X", pstAddIndication->sfAuthorizedSet.bValid); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "AdmittedSet--->"); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID: 0x%X", pstAddIndication->sfAdmittedSet.u32SFID); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", pstAddIndication->sfAdmittedSet.u16CID); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength: 0x%X", - pstAddIndication->sfAdmittedSet.u8ServiceClassNameLength); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, - "u8ServiceClassName: 0x%*ph", - 6, pstAddIndication->sfAdmittedSet.u8ServiceClassName); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService: 0x%02X", pstAddIndication->sfAdmittedSet.u8MBSService); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet: 0x%02X", pstAddIndication->sfAdmittedSet.u8QosParamSet); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority: 0x%02X", pstAddIndication->sfAdmittedSet.u8TrafficPriority); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst: 0x%X", pstAddIndication->sfAdmittedSet.u32MaxTrafficBurst); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate: 0x%X", - pstAddIndication->sfAdmittedSet.u32MinReservedTrafficRate); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength: 0x%02X", - pstAddIndication->sfAdmittedSet.u8VendorSpecificQoSParamLength); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam: 0x%02X", - pstAddIndication->sfAdmittedSet.u8VendorSpecificQoSParam[0]); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType: 0x%02X", - pstAddIndication->sfAdmittedSet.u8ServiceFlowSchedulingType); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter: 0x%X", pstAddIndication->sfAdmittedSet.u32ToleratedJitter); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency: 0x%X", pstAddIndication->sfAdmittedSet.u32MaximumLatency); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%02X", - pstAddIndication->sfAdmittedSet.u8FixedLengthVSVariableLengthSDUIndicator); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize: 0x%02X", pstAddIndication->sfAdmittedSet.u8SDUSize); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TargetSAID: 0x%02X", pstAddIndication->sfAdmittedSet.u16TargetSAID); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQEnable: 0x%02X", pstAddIndication->sfAdmittedSet.u8ARQEnable); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQWindowSize: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQWindowSize); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryTxTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQRetryTxTimeOut); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryRxTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQRetryRxTimeOut); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockLifeTime: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQBlockLifeTime); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQSyncLossTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQSyncLossTimeOut); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQDeliverInOrder: 0x%02X", pstAddIndication->sfAdmittedSet.u8ARQDeliverInOrder); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRxPurgeTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQRxPurgeTimeOut); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockSize: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQBlockSize); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8CSSpecification: 0x%02X", pstAddIndication->sfAdmittedSet.u8CSSpecification); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TypeOfDataDeliveryService: 0x%02X", - pstAddIndication->sfAdmittedSet.u8TypeOfDataDeliveryService); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16SDUInterArrivalTime: 0x%X", pstAddIndication->sfAdmittedSet.u16SDUInterArrivalTime); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TimeBase: 0x%X", pstAddIndication->sfAdmittedSet.u16TimeBase); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8PagingPreference: 0x%X", pstAddIndication->sfAdmittedSet.u8PagingPreference); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficIndicationPreference: 0x%02X", - pstAddIndication->sfAdmittedSet.u8TrafficIndicationPreference); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received: 0x%X", pstAddIndication->sfAdmittedSet.u8TotalClassifiers); - - nCurClassifierCnt = pstAddIndication->sfAdmittedSet.u8TotalClassifiers; - if (nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF) - nCurClassifierCnt = MAX_CLASSIFIERS_IN_SF; - - for (nIndex = 0; nIndex < nCurClassifierCnt; nIndex++) { - struct bcm_convergence_types *psfCSType = NULL; - - psfCSType = &pstAddIndication->sfAdmittedSet.cConvergenceSLTypes[nIndex]; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " CCPacketClassificationRuleSI====>"); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ClassifierRulePriority: 0x%02X ", - psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfServiceLength: 0x%02X", - psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, "u8IPTypeOfService[3]: 0x%*ph", - 3, psfCSType->cCPacketClassificationRule. - u8IPTypeOfService); - for (uiLoopIndex = 0; uiLoopIndex < 1; uiLoopIndex++) - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Protocol: 0x%02X ", psfCSType->cCPacketClassificationRule.u8Protocol); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength: 0x%02X ", - psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength); - - for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++) - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32]: 0x%02X ", - psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength: 0x%02X ", - psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength); - - for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++) - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddress[32]: 0x%02X ", - psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRangeLength: 0x%02X ", - psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, "u8ProtocolSourcePortRange[4]: 0x%*ph ", - 4, psfCSType->cCPacketClassificationRule. - u8ProtocolSourcePortRange); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRangeLength: 0x%02X ", - psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, "u8ProtocolDestPortRange[4]: 0x%*ph ", - 4, psfCSType->cCPacketClassificationRule. - u8ProtocolDestPortRange); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddressLength: 0x%02X ", - psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, "u8EthernetDestMacAddress[6]: %pM", - psfCSType->cCPacketClassificationRule. - u8EthernetDestMacAddress); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddressLength: 0x%02X ", - psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]: %pM", - psfCSType->cCPacketClassificationRule. - u8EthernetSourceMACAddress); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthertypeLength: 0x%02X ", psfCSType->cCPacketClassificationRule.u8EthertypeLength); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, "u8Ethertype[3]: 0x%*ph", - 3, psfCSType->cCPacketClassificationRule. - u8Ethertype); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UserPriority: 0x%X ", psfCSType->cCPacketClassificationRule.u16UserPriority); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VLANID: 0x%X ", psfCSType->cCPacketClassificationRule.u16VLANID); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8AssociatedPHSI: 0x%02X ", psfCSType->cCPacketClassificationRule.u8AssociatedPHSI); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16PacketClassificationRuleIndex: 0x%X ", - psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParamLength: 0x%02X", - psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParam[1]: 0x%02X ", - psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]); -#ifdef VERSION_D5 - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLableLength: 0x%X ", - psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, "u8IPv6FlowLable[6]: 0x%*ph ", - 6, psfCSType->cCPacketClassificationRule. - u8IPv6FlowLable); -#endif - } - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "bValid: 0x%X", pstAddIndication->sfAdmittedSet.bValid); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " ActiveSet--->"); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID: 0x%X", pstAddIndication->sfActiveSet.u32SFID); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", pstAddIndication->sfActiveSet.u16CID); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength: 0x%X", pstAddIndication->sfActiveSet.u8ServiceClassNameLength); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, - "u8ServiceClassName: 0x%*ph", - 6, pstAddIndication->sfActiveSet.u8ServiceClassName); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService: 0x%02X", pstAddIndication->sfActiveSet.u8MBSService); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet: 0x%02X", pstAddIndication->sfActiveSet.u8QosParamSet); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority: 0x%02X", pstAddIndication->sfActiveSet.u8TrafficPriority); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst: 0x%X", pstAddIndication->sfActiveSet.u32MaxTrafficBurst); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate: 0x%X", - pstAddIndication->sfActiveSet.u32MinReservedTrafficRate); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength: 0x%02X", - pstAddIndication->sfActiveSet.u8VendorSpecificQoSParamLength); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam: 0x%02X", - pstAddIndication->sfActiveSet.u8VendorSpecificQoSParam[0]); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType: 0x%02X", - pstAddIndication->sfActiveSet.u8ServiceFlowSchedulingType); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter: 0x%X", pstAddIndication->sfActiveSet.u32ToleratedJitter); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency: 0x%X", pstAddIndication->sfActiveSet.u32MaximumLatency); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%02X", - pstAddIndication->sfActiveSet.u8FixedLengthVSVariableLengthSDUIndicator); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize: 0x%X", pstAddIndication->sfActiveSet.u8SDUSize); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16TargetSAID: 0x%X", pstAddIndication->sfActiveSet.u16TargetSAID); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ARQEnable: 0x%X", pstAddIndication->sfActiveSet.u8ARQEnable); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQWindowSize: 0x%X", pstAddIndication->sfActiveSet.u16ARQWindowSize); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRetryTxTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQRetryTxTimeOut); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRetryRxTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQRetryRxTimeOut); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQBlockLifeTime: 0x%X", pstAddIndication->sfActiveSet.u16ARQBlockLifeTime); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQSyncLossTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQSyncLossTimeOut); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ARQDeliverInOrder: 0x%X", pstAddIndication->sfActiveSet.u8ARQDeliverInOrder); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRxPurgeTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQRxPurgeTimeOut); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQBlockSize: 0x%X", pstAddIndication->sfActiveSet.u16ARQBlockSize); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8CSSpecification: 0x%X", pstAddIndication->sfActiveSet.u8CSSpecification); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8TypeOfDataDeliveryService: 0x%X", - pstAddIndication->sfActiveSet.u8TypeOfDataDeliveryService); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16SDUInterArrivalTime: 0x%X", pstAddIndication->sfActiveSet.u16SDUInterArrivalTime); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16TimeBase: 0x%X", pstAddIndication->sfActiveSet.u16TimeBase); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8PagingPreference: 0x%X", pstAddIndication->sfActiveSet.u8PagingPreference); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8TrafficIndicationPreference: 0x%X", - pstAddIndication->sfActiveSet.u8TrafficIndicationPreference); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received: 0x%X", pstAddIndication->sfActiveSet.u8TotalClassifiers); - - nCurClassifierCnt = pstAddIndication->sfActiveSet.u8TotalClassifiers; - if (nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF) - nCurClassifierCnt = MAX_CLASSIFIERS_IN_SF; - - for (nIndex = 0; nIndex < nCurClassifierCnt; nIndex++) { - struct bcm_convergence_types *psfCSType = NULL; - struct bcm_packet_class_rules *clsRule = NULL; - - psfCSType = &pstAddIndication->sfActiveSet.cConvergenceSLTypes[nIndex]; - clsRule = &psfCSType->cCPacketClassificationRule; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, " CCPacketClassificationRuleSI====>"); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, " u8ClassifierRulePriority: 0x%X ", - clsRule->u8ClassifierRulePriority); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, " u8IPTypeOfServiceLength: 0x%X ", - clsRule->u8IPTypeOfServiceLength); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, - " u8IPTypeOfService[3]: 0x%X ,0x%X ,0x%X ", - clsRule->u8IPTypeOfService[0], - clsRule->u8IPTypeOfService[1], - clsRule->u8IPTypeOfService[2]); - - for (uiLoopIndex = 0; uiLoopIndex < 1; uiLoopIndex++) - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, - " u8Protocol: 0x%X ", - clsRule->u8Protocol); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, - "u8IPMaskedSourceAddressLength: 0x%X ", - clsRule->u8IPMaskedSourceAddressLength); - - for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++) - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, - "u8IPMaskedSourceAddress[32]: 0x%X ", - clsRule->u8IPMaskedSourceAddress[uiLoopIndex]); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, - "u8IPDestinationAddressLength: 0x%02X ", - clsRule->u8IPDestinationAddressLength); - - for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++) - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, - " u8IPDestinationAddress[32]:0x%X ", - clsRule->u8IPDestinationAddress[uiLoopIndex]); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, - " u8ProtocolSourcePortRangeLength: 0x%X ", - clsRule->u8ProtocolSourcePortRangeLength); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, - " u8ProtocolSourcePortRange[4]: 0x%X ,0x%X ,0x%X ,0x%X ", - clsRule->u8ProtocolSourcePortRange[0], - clsRule->u8ProtocolSourcePortRange[1], - clsRule->u8ProtocolSourcePortRange[2], - clsRule->u8ProtocolSourcePortRange[3]); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, - " u8ProtocolDestPortRangeLength: 0x%X ", - clsRule->u8ProtocolDestPortRangeLength); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, - " u8ProtocolDestPortRange[4]: 0x%X ,0x%X ,0x%X ,0x%X ", - clsRule->u8ProtocolDestPortRange[0], - clsRule->u8ProtocolDestPortRange[1], - clsRule->u8ProtocolDestPortRange[2], - clsRule->u8ProtocolDestPortRange[3]); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, - " u8EthernetDestMacAddressLength: 0x%X ", - clsRule->u8EthernetDestMacAddressLength); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, - " u8EthernetDestMacAddress[6]: 0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X", - clsRule->u8EthernetDestMacAddress[0], - clsRule->u8EthernetDestMacAddress[1], - clsRule->u8EthernetDestMacAddress[2], - clsRule->u8EthernetDestMacAddress[3], - clsRule->u8EthernetDestMacAddress[4], - clsRule->u8EthernetDestMacAddress[5]); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, - " u8EthernetSourceMACAddressLength: 0x%X ", - clsRule->u8EthernetDestMacAddressLength); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, - "u8EthernetSourceMACAddress[6]: 0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X", - clsRule->u8EthernetSourceMACAddress[0], - clsRule->u8EthernetSourceMACAddress[1], - clsRule->u8EthernetSourceMACAddress[2], - clsRule->u8EthernetSourceMACAddress[3], - clsRule->u8EthernetSourceMACAddress[4], - clsRule->u8EthernetSourceMACAddress[5]); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, " u8EthertypeLength: 0x%X ", - clsRule->u8EthertypeLength); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, - " u8Ethertype[3]: 0x%X ,0x%X ,0x%X ", - clsRule->u8Ethertype[0], - clsRule->u8Ethertype[1], - clsRule->u8Ethertype[2]); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, " u16UserPriority: 0x%X ", - clsRule->u16UserPriority); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, " u16VLANID: 0x%X ", - clsRule->u16VLANID); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, " u8AssociatedPHSI: 0x%X ", - clsRule->u8AssociatedPHSI); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, - " u16PacketClassificationRuleIndex:0x%X ", - clsRule->u16PacketClassificationRuleIndex); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, - " u8VendorSpecificClassifierParamLength:0x%X ", - clsRule->u8VendorSpecificClassifierParamLength); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, - " u8VendorSpecificClassifierParam[1]:0x%X ", - clsRule->u8VendorSpecificClassifierParam[0]); -#ifdef VERSION_D5 - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, " u8IPv6FlowLableLength: 0x%X ", - clsRule->u8IPv6FlowLableLength); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, - DBG_LVL_ALL, - " u8IPv6FlowLable[6]: 0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X ", - clsRule->u8IPv6FlowLable[0], - clsRule->u8IPv6FlowLable[1], - clsRule->u8IPv6FlowLable[2], - clsRule->u8IPv6FlowLable[3], - clsRule->u8IPv6FlowLable[4], - clsRule->u8IPv6FlowLable[5]); -#endif - } - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, - " bValid: 0x%X", pstAddIndication->sfActiveSet.bValid); -} - -static inline ULONG RestoreSFParam(struct bcm_mini_adapter *Adapter, - ULONG ulAddrSFParamSet, PUCHAR pucDestBuffer) -{ - UINT nBytesToRead = sizeof(struct bcm_connect_mgr_params); - - if (ulAddrSFParamSet == 0 || NULL == pucDestBuffer) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "Got Param address as 0!!"); - return 0; - } - ulAddrSFParamSet = ntohl(ulAddrSFParamSet); - - /* Read out the SF Param Set At the indicated Location */ - if (rdm(Adapter, ulAddrSFParamSet, (PUCHAR)pucDestBuffer, nBytesToRead) < 0) - return STATUS_FAILURE; - - return 1; -} - -static ULONG StoreSFParam(struct bcm_mini_adapter *Adapter, PUCHAR pucSrcBuffer, - ULONG ulAddrSFParamSet) -{ - UINT nBytesToWrite = sizeof(struct bcm_connect_mgr_params); - int ret = 0; - - if (ulAddrSFParamSet == 0 || NULL == pucSrcBuffer) - return 0; - - ret = wrm(Adapter, ulAddrSFParamSet, (u8 *)pucSrcBuffer, nBytesToWrite); - if (ret < 0) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "%s:%d WRM failed", __func__, __LINE__); - return ret; - } - return 1; -} - -ULONG StoreCmControlResponseMessage(struct bcm_mini_adapter *Adapter, - PVOID pvBuffer, UINT *puBufferLength) -{ - struct bcm_add_indication_alt *pstAddIndicationAlt = NULL; - struct bcm_add_indication *pstAddIndication = NULL; - struct bcm_del_request *pstDeletionRequest; - UINT uiSearchRuleIndex; - ULONG ulSFID; - - pstAddIndicationAlt = pvBuffer; - - /* - * In case of DSD Req By MS, we should immediately delete this SF so that - * we can stop the further classifying the pkt for this SF. - */ - if (pstAddIndicationAlt->u8Type == DSD_REQ) { - pstDeletionRequest = pvBuffer; - - ulSFID = ntohl(pstDeletionRequest->u32SFID); - uiSearchRuleIndex = SearchSfid(Adapter, ulSFID); - - if (uiSearchRuleIndex < NO_OF_QUEUES) { - deleteSFBySfid(Adapter, uiSearchRuleIndex); - Adapter->u32TotalDSD++; - } - return 1; - } - - if ((pstAddIndicationAlt->u8Type == DSD_RSP) || - (pstAddIndicationAlt->u8Type == DSD_ACK)) { - /* No Special handling send the message as it is */ - return 1; - } - /* For DSA_REQ, only up to "psfAuthorizedSet" parameter should be accessed by driver! */ - - pstAddIndication = kmalloc(sizeof(struct bcm_add_indication), - GFP_KERNEL); - if (pstAddIndication == NULL) - return 0; - - /* AUTHORIZED SET */ - pstAddIndication->psfAuthorizedSet = (struct bcm_connect_mgr_params *) - GetNextTargetBufferLocation(Adapter, - pstAddIndicationAlt->u16TID); - if (!pstAddIndication->psfAuthorizedSet) { - kfree(pstAddIndication); - return 0; - } - - if (StoreSFParam(Adapter, (PUCHAR)&pstAddIndicationAlt->sfAuthorizedSet, - (ULONG)pstAddIndication->psfAuthorizedSet) != 1) { - kfree(pstAddIndication); - return 0; - } - - /* this can't possibly be right */ - pstAddIndication->psfAuthorizedSet = - (struct bcm_connect_mgr_params *) ntohl( - (ULONG)pstAddIndication->psfAuthorizedSet); - - if (pstAddIndicationAlt->u8Type == DSA_REQ) { - struct bcm_add_request AddRequest; - - AddRequest.u8Type = pstAddIndicationAlt->u8Type; - AddRequest.eConnectionDir = pstAddIndicationAlt->u8Direction; - AddRequest.u16TID = pstAddIndicationAlt->u16TID; - AddRequest.u16CID = pstAddIndicationAlt->u16CID; - AddRequest.u16VCID = pstAddIndicationAlt->u16VCID; - AddRequest.psfParameterSet = pstAddIndication->psfAuthorizedSet; - (*puBufferLength) = sizeof(struct bcm_add_request); - memcpy(pvBuffer, &AddRequest, sizeof(struct bcm_add_request)); - kfree(pstAddIndication); - return 1; - } - - /* Since it's not DSA_REQ, we can access all field in pstAddIndicationAlt */ - /* We need to extract the structure from the buffer and pack it differently */ - - pstAddIndication->u8Type = pstAddIndicationAlt->u8Type; - pstAddIndication->eConnectionDir = pstAddIndicationAlt->u8Direction; - pstAddIndication->u16TID = pstAddIndicationAlt->u16TID; - pstAddIndication->u16CID = pstAddIndicationAlt->u16CID; - pstAddIndication->u16VCID = pstAddIndicationAlt->u16VCID; - pstAddIndication->u8CC = pstAddIndicationAlt->u8CC; - - /* ADMITTED SET */ - pstAddIndication->psfAdmittedSet = (struct bcm_connect_mgr_params *) - GetNextTargetBufferLocation(Adapter, - pstAddIndicationAlt->u16TID); - if (!pstAddIndication->psfAdmittedSet) { - kfree(pstAddIndication); - return 0; - } - if (StoreSFParam(Adapter, (PUCHAR)&pstAddIndicationAlt->sfAdmittedSet, - (ULONG)pstAddIndication->psfAdmittedSet) != 1) { - kfree(pstAddIndication); - return 0; - } - - pstAddIndication->psfAdmittedSet = - (struct bcm_connect_mgr_params *) ntohl( - (ULONG) pstAddIndication->psfAdmittedSet); - - /* ACTIVE SET */ - pstAddIndication->psfActiveSet = (struct bcm_connect_mgr_params *) - GetNextTargetBufferLocation(Adapter, - pstAddIndicationAlt->u16TID); - if (!pstAddIndication->psfActiveSet) { - kfree(pstAddIndication); - return 0; - } - if (StoreSFParam(Adapter, (PUCHAR)&pstAddIndicationAlt->sfActiveSet, - (ULONG)pstAddIndication->psfActiveSet) != 1) { - kfree(pstAddIndication); - return 0; - } - - pstAddIndication->psfActiveSet = - (struct bcm_connect_mgr_params *) ntohl( - (ULONG)pstAddIndication->psfActiveSet); - - (*puBufferLength) = sizeof(struct bcm_add_indication); - *(struct bcm_add_indication *)pvBuffer = *pstAddIndication; - kfree(pstAddIndication); - return 1; -} - -static inline struct bcm_add_indication_alt -*RestoreCmControlResponseMessage(register struct bcm_mini_adapter *Adapter, - register PVOID pvBuffer) -{ - ULONG ulStatus = 0; - struct bcm_add_indication *pstAddIndication = NULL; - struct bcm_add_indication_alt *pstAddIndicationDest = NULL; - - pstAddIndication = pvBuffer; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "=====>"); - if ((pstAddIndication->u8Type == DSD_REQ) || - (pstAddIndication->u8Type == DSD_RSP) || - (pstAddIndication->u8Type == DSD_ACK)) - return pvBuffer; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "Inside RestoreCmControlResponseMessage "); - /* - * Need to Allocate memory to contain the SUPER Large structures - * Our driver can't create these structures on Stack :( - */ - pstAddIndicationDest = kmalloc(sizeof(struct bcm_add_indication_alt), - GFP_KERNEL); - - if (pstAddIndicationDest) { - memset(pstAddIndicationDest, 0, - sizeof(struct bcm_add_indication_alt)); - } else { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, - DBG_LVL_ALL, - "Failed to allocate memory for SF Add Indication Structure "); - return NULL; - } - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "AddIndication-u8Type : 0x%X", - pstAddIndication->u8Type); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "AddIndication-u8Direction : 0x%X", - pstAddIndication->eConnectionDir); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "AddIndication-u8TID : 0x%X", - ntohs(pstAddIndication->u16TID)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "AddIndication-u8CID : 0x%X", - ntohs(pstAddIndication->u16CID)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "AddIndication-u16VCID : 0x%X", - ntohs(pstAddIndication->u16VCID)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "AddIndication-autorized set loc : %p", - pstAddIndication->psfAuthorizedSet); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "AddIndication-admitted set loc : %p", - pstAddIndication->psfAdmittedSet); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "AddIndication-Active set loc : %p", - pstAddIndication->psfActiveSet); - - pstAddIndicationDest->u8Type = pstAddIndication->u8Type; - pstAddIndicationDest->u8Direction = pstAddIndication->eConnectionDir; - pstAddIndicationDest->u16TID = pstAddIndication->u16TID; - pstAddIndicationDest->u16CID = pstAddIndication->u16CID; - pstAddIndicationDest->u16VCID = pstAddIndication->u16VCID; - pstAddIndicationDest->u8CC = pstAddIndication->u8CC; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "Restoring Active Set "); - ulStatus = RestoreSFParam(Adapter, - (ULONG)pstAddIndication->psfActiveSet, - (PUCHAR)&pstAddIndicationDest->sfActiveSet); - if (ulStatus != 1) - goto failed_restore_sf_param; - - if (pstAddIndicationDest->sfActiveSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF) - pstAddIndicationDest->sfActiveSet.u8TotalClassifiers = - MAX_CLASSIFIERS_IN_SF; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "Restoring Admitted Set "); - ulStatus = RestoreSFParam(Adapter, - (ULONG)pstAddIndication->psfAdmittedSet, - (PUCHAR)&pstAddIndicationDest->sfAdmittedSet); - if (ulStatus != 1) - goto failed_restore_sf_param; - - if (pstAddIndicationDest->sfAdmittedSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF) - pstAddIndicationDest->sfAdmittedSet.u8TotalClassifiers = - MAX_CLASSIFIERS_IN_SF; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "Restoring Authorized Set "); - ulStatus = RestoreSFParam(Adapter, - (ULONG)pstAddIndication->psfAuthorizedSet, - (PUCHAR)&pstAddIndicationDest->sfAuthorizedSet); - if (ulStatus != 1) - goto failed_restore_sf_param; - - if (pstAddIndicationDest->sfAuthorizedSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF) - pstAddIndicationDest->sfAuthorizedSet.u8TotalClassifiers = - MAX_CLASSIFIERS_IN_SF; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "Dumping the whole raw packet"); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "============================================================"); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - " pstAddIndicationDest->sfActiveSet size %zx %p", - sizeof(*pstAddIndicationDest), pstAddIndicationDest); - /* BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_OTHERS, CONN_MSG, - * DBG_LVL_ALL, (unsigned char *)pstAddIndicationDest, - * sizeof(*pstAddIndicationDest)); - */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "============================================================"); - return pstAddIndicationDest; -failed_restore_sf_param: - kfree(pstAddIndicationDest); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "<====="); - return NULL; -} - -ULONG SetUpTargetDsxBuffers(struct bcm_mini_adapter *Adapter) -{ - ULONG ulTargetDsxBuffersBase = 0; - ULONG ulCntTargetBuffers; - ULONG i; - int Status; - - if (!Adapter) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "Adapter was NULL!!!"); - return 0; - } - - if (Adapter->astTargetDsxBuffer[0].ulTargetDsxBuffer) - return 1; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "Size of Each DSX Buffer(Also size of connection manager parameters): %zx ", - sizeof(struct bcm_connect_mgr_params)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "Reading DSX buffer From Target location %x ", - DSX_MESSAGE_EXCHANGE_BUFFER); - - Status = rdmalt(Adapter, DSX_MESSAGE_EXCHANGE_BUFFER, - (PUINT)&ulTargetDsxBuffersBase, sizeof(UINT)); - if (Status < 0) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "RDM failed!!"); - return 0; - } - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "Base Address Of DSX Target Buffer : 0x%lx", - ulTargetDsxBuffersBase); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "Tgt Buffer is Now %lx :", ulTargetDsxBuffersBase); - ulCntTargetBuffers = DSX_MESSAGE_EXCHANGE_BUFFER_SIZE / - sizeof(struct bcm_connect_mgr_params); - - Adapter->ulTotalTargetBuffersAvailable = - ulCntTargetBuffers > MAX_TARGET_DSX_BUFFERS ? - MAX_TARGET_DSX_BUFFERS : ulCntTargetBuffers; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - " Total Target DSX Buffer setup %lx ", - Adapter->ulTotalTargetBuffersAvailable); - - for (i = 0; i < Adapter->ulTotalTargetBuffersAvailable; i++) { - Adapter->astTargetDsxBuffer[i].ulTargetDsxBuffer = ulTargetDsxBuffersBase; - Adapter->astTargetDsxBuffer[i].valid = 1; - Adapter->astTargetDsxBuffer[i].tid = 0; - ulTargetDsxBuffersBase += sizeof(struct bcm_connect_mgr_params); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " Target DSX Buffer %lx setup at 0x%lx", - i, Adapter->astTargetDsxBuffer[i].ulTargetDsxBuffer); - } - Adapter->ulCurrentTargetBuffer = 0; - Adapter->ulFreeTargetBufferCnt = Adapter->ulTotalTargetBuffersAvailable; - return 1; -} - -static ULONG GetNextTargetBufferLocation(struct bcm_mini_adapter *Adapter, - B_UINT16 tid) -{ - ULONG dsx_buf; - ULONG idx, max_try; - - if ((Adapter->ulTotalTargetBuffersAvailable == 0) - || (Adapter->ulFreeTargetBufferCnt == 0)) { - ClearTargetDSXBuffer(Adapter, tid, false); - return 0; - } - - idx = Adapter->ulCurrentTargetBuffer; - max_try = Adapter->ulTotalTargetBuffersAvailable; - while ((max_try) && (Adapter->astTargetDsxBuffer[idx].valid != 1)) { - idx = (idx+1) % Adapter->ulTotalTargetBuffersAvailable; - max_try--; - } - - if (max_try == 0) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, - "\n GetNextTargetBufferLocation : Error No Free Target DSX Buffers FreeCnt : %lx ", - Adapter->ulFreeTargetBufferCnt); - ClearTargetDSXBuffer(Adapter, tid, false); - return 0; - } - - dsx_buf = Adapter->astTargetDsxBuffer[idx].ulTargetDsxBuffer; - Adapter->astTargetDsxBuffer[idx].valid = 0; - Adapter->astTargetDsxBuffer[idx].tid = tid; - Adapter->ulFreeTargetBufferCnt--; - idx = (idx+1)%Adapter->ulTotalTargetBuffersAvailable; - Adapter->ulCurrentTargetBuffer = idx; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, - "GetNextTargetBufferLocation :Returning address %lx tid %d\n", - dsx_buf, tid); - - return dsx_buf; -} - -int AllocAdapterDsxBuffer(struct bcm_mini_adapter *Adapter) -{ - /* - * Need to Allocate memory to contain the SUPER Large structures - * Our driver can't create these structures on Stack - */ - Adapter->caDsxReqResp = kmalloc(sizeof(struct bcm_add_indication_alt) - + LEADER_SIZE, GFP_KERNEL); - if (!Adapter->caDsxReqResp) - return -ENOMEM; - - return 0; -} - -int FreeAdapterDsxBuffer(struct bcm_mini_adapter *Adapter) -{ - kfree(Adapter->caDsxReqResp); - return 0; -} - -/* - * @ingroup ctrl_pkt_functions - * This routinue would process the Control responses - * for the Connection Management. - * @return - Queue index for the free SFID else returns Invalid Index. - */ -bool CmControlResponseMessage(struct bcm_mini_adapter *Adapter, /* u16TID, false); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Error in restoring Service Flow param structure from DSx message"); - return false; - } - - DumpCmControlPacket(pstAddIndication); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "====>"); - pLeader = (struct bcm_leader *)Adapter->caDsxReqResp; - - pLeader->Status = CM_CONTROL_NEWDSX_MULTICLASSIFIER_REQ; - pLeader->Vcid = 0; - - ClearTargetDSXBuffer(Adapter, pstAddIndication->u16TID, false); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "### TID RECEIVED %d\n", pstAddIndication->u16TID); - switch (pstAddIndication->u8Type) { - case DSA_REQ: - pLeader->PLength = sizeof(struct bcm_add_indication_alt); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Sending DSA Response....\n"); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSA RESPONSE TO MAC %d", pLeader->PLength); - *((struct bcm_add_indication_alt *)&(Adapter->caDsxReqResp[LEADER_SIZE])) - = *pstAddIndication; - ((struct bcm_add_indication_alt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSA_RSP; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " VCID = %x", ntohs(pstAddIndication->u16VCID)); - CopyBufferToControlPacket(Adapter, (PVOID)Adapter->caDsxReqResp); - kfree(pstAddIndication); - break; - case DSA_RSP: - pLeader->PLength = sizeof(struct bcm_add_indication_alt); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSA ACK TO MAC %d", - pLeader->PLength); - *((struct bcm_add_indication_alt *)&(Adapter->caDsxReqResp[LEADER_SIZE])) - = *pstAddIndication; - ((struct bcm_add_indication_alt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSA_ACK; - /* FALLTHROUGH */ - case DSA_ACK: - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "VCID:0x%X", - ntohs(pstAddIndication->u16VCID)); - uiSearchRuleIndex = SearchFreeSfid(Adapter); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "uiSearchRuleIndex:0x%X ", - uiSearchRuleIndex); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Direction:0x%X ", - pstAddIndication->u8Direction); - if (uiSearchRuleIndex < NO_OF_QUEUES) { - Adapter->PackInfo[uiSearchRuleIndex].ucDirection = - pstAddIndication->u8Direction; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "bValid:0x%X ", - pstAddIndication->sfActiveSet.bValid); - if (pstAddIndication->sfActiveSet.bValid == TRUE) - Adapter->PackInfo[uiSearchRuleIndex].bActiveSet = TRUE; - - if (pstAddIndication->sfAuthorizedSet.bValid == TRUE) - Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet = TRUE; - - if (pstAddIndication->sfAdmittedSet.bValid == TRUE) - Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet = TRUE; - - if (pstAddIndication->sfActiveSet.bValid == false) { - Adapter->PackInfo[uiSearchRuleIndex].bActive = false; - Adapter->PackInfo[uiSearchRuleIndex].bActivateRequestSent = false; - if (pstAddIndication->sfAdmittedSet.bValid) - psfLocalSet = &pstAddIndication->sfAdmittedSet; - else if (pstAddIndication->sfAuthorizedSet.bValid) - psfLocalSet = &pstAddIndication->sfAuthorizedSet; - } else { - psfLocalSet = &pstAddIndication->sfActiveSet; - Adapter->PackInfo[uiSearchRuleIndex].bActive = TRUE; - } - - if (!psfLocalSet) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "No set is valid\n"); - Adapter->PackInfo[uiSearchRuleIndex].bActive = false; - Adapter->PackInfo[uiSearchRuleIndex].bValid = false; - Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = 0; - kfree(pstAddIndication); - } else if (psfLocalSet->bValid && (pstAddIndication->u8CC == 0)) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSA ACK"); - Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = ntohs(pstAddIndication->u16VCID); - Adapter->PackInfo[uiSearchRuleIndex].usCID = ntohs(pstAddIndication->u16CID); - - if (UPLINK_DIR == pstAddIndication->u8Direction) - atomic_set(&Adapter->PackInfo[uiSearchRuleIndex].uiPerSFTxResourceCount, DEFAULT_PERSFCOUNT); - - CopyToAdapter(Adapter, psfLocalSet, uiSearchRuleIndex, DSA_ACK, pstAddIndication); - /* don't free pstAddIndication */ - - /* Inside CopyToAdapter, Sorting of all the SFs take place. - * Hence any access to the newly added SF through uiSearchRuleIndex is invalid. - * SHOULD BE STRICTLY AVOIDED. - */ - /* *(PULONG)(((PUCHAR)pvBuffer)+1)=psfLocalSet->u32SFID; */ - memcpy((((PUCHAR)pvBuffer)+1), &psfLocalSet->u32SFID, 4); - - if (pstAddIndication->sfActiveSet.bValid == TRUE) { - if (UPLINK_DIR == pstAddIndication->u8Direction) { - if (!Adapter->LinkUpStatus) { - netif_carrier_on(Adapter->dev); - netif_start_queue(Adapter->dev); - Adapter->LinkUpStatus = 1; - if (netif_msg_link(Adapter)) - pr_info(PFX "%s: link up\n", Adapter->dev->name); - atomic_set(&Adapter->TxPktAvail, 1); - wake_up(&Adapter->tx_packet_wait_queue); - Adapter->liTimeSinceLastNetEntry = get_seconds(); - } - } - } - } else { - Adapter->PackInfo[uiSearchRuleIndex].bActive = false; - Adapter->PackInfo[uiSearchRuleIndex].bValid = false; - Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = 0; - kfree(pstAddIndication); - } - } else { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "DSA ACK did not get valid SFID"); - kfree(pstAddIndication); - return false; - } - break; - case DSC_REQ: - pLeader->PLength = sizeof(struct bcm_change_indication); - pstChangeIndication = (struct bcm_change_indication *)pstAddIndication; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSC RESPONSE TO MAC %d", pLeader->PLength); - - *((struct bcm_change_indication *)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *pstChangeIndication; - ((struct bcm_change_indication *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSC_RSP; - - CopyBufferToControlPacket(Adapter, (PVOID)Adapter->caDsxReqResp); - kfree(pstAddIndication); - break; - case DSC_RSP: - pLeader->PLength = sizeof(struct bcm_change_indication); - pstChangeIndication = (struct bcm_change_indication *)pstAddIndication; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSC ACK TO MAC %d", pLeader->PLength); - *((struct bcm_change_indication *)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *pstChangeIndication; - ((struct bcm_change_indication *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSC_ACK; - /* FALLTHROUGH */ - case DSC_ACK: - pstChangeIndication = (struct bcm_change_indication *)pstAddIndication; - uiSearchRuleIndex = SearchSfid(Adapter, ntohl(pstChangeIndication->sfActiveSet.u32SFID)); - if (uiSearchRuleIndex > NO_OF_QUEUES-1) - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "SF doesn't exist for which DSC_ACK is received"); - - if (uiSearchRuleIndex < NO_OF_QUEUES) { - Adapter->PackInfo[uiSearchRuleIndex].ucDirection = pstChangeIndication->u8Direction; - if (pstChangeIndication->sfActiveSet.bValid == TRUE) - Adapter->PackInfo[uiSearchRuleIndex].bActiveSet = TRUE; - - if (pstChangeIndication->sfAuthorizedSet.bValid == TRUE) - Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet = TRUE; - - if (pstChangeIndication->sfAdmittedSet.bValid == TRUE) - Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet = TRUE; - - if (pstChangeIndication->sfActiveSet.bValid == false) { - Adapter->PackInfo[uiSearchRuleIndex].bActive = false; - Adapter->PackInfo[uiSearchRuleIndex].bActivateRequestSent = false; - - if (pstChangeIndication->sfAdmittedSet.bValid) - psfLocalSet = &pstChangeIndication->sfAdmittedSet; - else if (pstChangeIndication->sfAuthorizedSet.bValid) - psfLocalSet = &pstChangeIndication->sfAuthorizedSet; - } else { - psfLocalSet = &pstChangeIndication->sfActiveSet; - Adapter->PackInfo[uiSearchRuleIndex].bActive = TRUE; - } - - if (!psfLocalSet) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "No set is valid\n"); - Adapter->PackInfo[uiSearchRuleIndex].bActive = false; - Adapter->PackInfo[uiSearchRuleIndex].bValid = false; - Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = 0; - kfree(pstAddIndication); - } else if (psfLocalSet->bValid && (pstChangeIndication->u8CC == 0)) { - Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = ntohs(pstChangeIndication->u16VCID); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "CC field is %d bvalid = %d\n", - pstChangeIndication->u8CC, psfLocalSet->bValid); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "VCID= %d\n", ntohs(pstChangeIndication->u16VCID)); - Adapter->PackInfo[uiSearchRuleIndex].usCID = ntohs(pstChangeIndication->u16CID); - CopyToAdapter(Adapter, psfLocalSet, uiSearchRuleIndex, DSC_ACK, pstAddIndication); - - *(PULONG)(((PUCHAR)pvBuffer)+1) = psfLocalSet->u32SFID; - } else if (pstChangeIndication->u8CC == 6) { - deleteSFBySfid(Adapter, uiSearchRuleIndex); - kfree(pstAddIndication); - } - } else { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "DSC ACK did not get valid SFID"); - kfree(pstAddIndication); - return false; - } - break; - case DSD_REQ: - pLeader->PLength = sizeof(struct bcm_del_indication); - *((struct bcm_del_indication *)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *((struct bcm_del_indication *)pstAddIndication); - - ulSFID = ntohl(((struct bcm_del_indication *)pstAddIndication)->u32SFID); - uiSearchRuleIndex = SearchSfid(Adapter, ulSFID); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSD - Removing connection %x", uiSearchRuleIndex); - - if (uiSearchRuleIndex < NO_OF_QUEUES) { - /* Delete All Classifiers Associated with this SFID */ - deleteSFBySfid(Adapter, uiSearchRuleIndex); - Adapter->u32TotalDSD++; - } - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSD RESPONSE TO MAC"); - ((struct bcm_del_indication *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSD_RSP; - CopyBufferToControlPacket(Adapter, (PVOID)Adapter->caDsxReqResp); - /* FALLTHROUGH */ - case DSD_RSP: - /* Do nothing as SF has already got Deleted */ - break; - case DSD_ACK: - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSD ACK Rcd, let App handle it\n"); - break; - default: - kfree(pstAddIndication); - return false; - } - return TRUE; -} - -int get_dsx_sf_data_to_application(struct bcm_mini_adapter *Adapter, - UINT uiSFId, void __user *user_buffer) -{ - int status = 0; - struct bcm_packet_info *psSfInfo = NULL; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "status =%d", status); - status = SearchSfid(Adapter, uiSFId); - if (status >= NO_OF_QUEUES) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "SFID %d not present in queue !!!", uiSFId); - return -EINVAL; - } - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "status =%d", status); - psSfInfo = &Adapter->PackInfo[status]; - if (psSfInfo->pstSFIndication - && copy_to_user(user_buffer, psSfInfo->pstSFIndication, - sizeof(struct bcm_add_indication_alt))) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, - "copy to user failed SFID %d, present in queue !!!", - uiSFId); - status = -EFAULT; - return status; - } - return STATUS_SUCCESS; -} - -VOID OverrideServiceFlowParams(struct bcm_mini_adapter *Adapter, - PUINT puiBuffer) -{ - B_UINT32 u32NumofSFsinMsg = ntohl(*(puiBuffer + 1)); - struct bcm_stim_sfhostnotify *pHostInfo = NULL; - UINT uiSearchRuleIndex = 0; - ULONG ulSFID = 0; - - puiBuffer += 2; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "u32NumofSFsinMsg: 0x%x\n", u32NumofSFsinMsg); - - while (u32NumofSFsinMsg != 0 && u32NumofSFsinMsg < NO_OF_QUEUES) { - u32NumofSFsinMsg--; - pHostInfo = (struct bcm_stim_sfhostnotify *)puiBuffer; - puiBuffer = (PUINT)(pHostInfo + 1); - - ulSFID = ntohl(pHostInfo->SFID); - uiSearchRuleIndex = SearchSfid(Adapter, ulSFID); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "SFID: 0x%lx\n", ulSFID); - - if (uiSearchRuleIndex >= NO_OF_QUEUES - || uiSearchRuleIndex == HiPriority) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, - DBG_LVL_ALL, - "The SFID <%lx> doesn't exist in host entry or is Invalid\n", - ulSFID); - continue; - } - - if (pHostInfo->RetainSF == false) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, - DBG_LVL_ALL, "Going to Delete SF"); - deleteSFBySfid(Adapter, uiSearchRuleIndex); - } else { - struct bcm_packet_info *packinfo = - &Adapter->PackInfo[uiSearchRuleIndex]; - - packinfo->usVCID_Value = ntohs(pHostInfo->VCID); - packinfo->usCID = ntohs(pHostInfo->newCID); - packinfo->bActive = false; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, - DBG_LVL_ALL, - "pHostInfo->QoSParamSet: 0x%x\n", - pHostInfo->QoSParamSet); - - if (pHostInfo->QoSParamSet & 0x1) - packinfo->bAuthorizedSet = TRUE; - if (pHostInfo->QoSParamSet & 0x2) - packinfo->bAdmittedSet = TRUE; - if (pHostInfo->QoSParamSet & 0x4) { - packinfo->bActiveSet = TRUE; - packinfo->bActive = TRUE; - } - } - } -} - -static void restore_endianess_of_pstClassifierEntry( - struct bcm_classifier_rule *pstClassifierEntry, - enum bcm_ipaddr_context eIpAddrContext) -{ - int i; - union u_ip_address *stSrc = &pstClassifierEntry->stSrcIpAddress; - union u_ip_address *stDest = &pstClassifierEntry->stDestIpAddress; - - for (i = 0; i < MAX_IP_RANGE_LENGTH * 4; i++) { - if (eIpAddrContext == eSrcIpAddress) { - stSrc->ulIpv6Addr[i] = ntohl(stSrc->ulIpv6Addr[i]); - stSrc->ulIpv6Mask[i] = ntohl(stSrc->ulIpv6Mask[i]); - } else if (eIpAddrContext == eDestIpAddress) { - stDest->ulIpv6Addr[i] = ntohl(stDest->ulIpv6Addr[i]); - stDest->ulIpv6Mask[i] = ntohl(stDest->ulIpv6Mask[i]); - } - } -} - -static void apply_phs_rule_to_all_classifiers( - register struct bcm_mini_adapter *Adapter, /* u8Direction == UPLINK_DIR) { - for (uiClassifierIndex = 0; uiClassifierIndex < MAX_CLASSIFIERS; uiClassifierIndex++) { - curr_classifier = - &Adapter->astClassifierTable[uiClassifierIndex]; - if ((curr_classifier->bUsed) && - (curr_classifier->ulSFID == Adapter->PackInfo[uiSearchRuleIndex].ulSFID) && - (curr_classifier->u8AssociatedPHSI == cPhsRule->u8PHSI)) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, - "Adding PHS Rule For Classifier: 0x%x cPhsRule.u8PHSI: 0x%x\n", - curr_classifier->uiClassifierRuleIndex, - cPhsRule->u8PHSI); - /* Update The PHS Rule for this classifier as Associated PHSI id defined */ - - /* Copy the PHS Rule */ - sPhsRule->u8PHSI = cPhsRule->u8PHSI; - sPhsRule->u8PHSFLength = cPhsRule->u8PHSFLength; - sPhsRule->u8PHSMLength = cPhsRule->u8PHSMLength; - sPhsRule->u8PHSS = cPhsRule->u8PHSS; - sPhsRule->u8PHSV = cPhsRule->u8PHSV; - memcpy(sPhsRule->u8PHSF, cPhsRule->u8PHSF, MAX_PHS_LENGTHS); - memcpy(sPhsRule->u8PHSM, cPhsRule->u8PHSM, MAX_PHS_LENGTHS); - sPhsRule->u8RefCnt = 0; - sPhsRule->bUnclassifiedPHSRule = false; - sPhsRule->PHSModifiedBytes = 0; - sPhsRule->PHSModifiedNumPackets = 0; - sPhsRule->PHSErrorNumPackets = 0; - - /* bPHSRuleAssociated = TRUE; */ - /* Store The PHS Rule for this classifier */ - - PhsUpdateClassifierRule( - &Adapter->stBCMPhsContext, - uVCID, - curr_classifier->uiClassifierRuleIndex, - sPhsRule, - curr_classifier->u8AssociatedPHSI); - - /* Update PHS Rule For the Classifier */ - if (sPhsRule->u8PHSI) { - curr_classifier->u32PHSRuleID = sPhsRule->u8PHSI; - memcpy(&curr_classifier->sPhsRule, sPhsRule, sizeof(struct bcm_phs_rule)); - } - } - } - } else { - /* Error PHS Rule specified in signaling could not be applied to any classifier */ - - /* Copy the PHS Rule */ - sPhsRule->u8PHSI = cPhsRule->u8PHSI; - sPhsRule->u8PHSFLength = cPhsRule->u8PHSFLength; - sPhsRule->u8PHSMLength = cPhsRule->u8PHSMLength; - sPhsRule->u8PHSS = cPhsRule->u8PHSS; - sPhsRule->u8PHSV = cPhsRule->u8PHSV; - memcpy(sPhsRule->u8PHSF, cPhsRule->u8PHSF, MAX_PHS_LENGTHS); - memcpy(sPhsRule->u8PHSM, cPhsRule->u8PHSM, MAX_PHS_LENGTHS); - sPhsRule->u8RefCnt = 0; - sPhsRule->bUnclassifiedPHSRule = TRUE; - sPhsRule->PHSModifiedBytes = 0; - sPhsRule->PHSModifiedNumPackets = 0; - sPhsRule->PHSErrorNumPackets = 0; - /* Store The PHS Rule for this classifier */ - - /* - * Passing the argument u8PHSI instead of clsid. Because for DL with no classifier rule, - * clsid will be zero hence we can't have multiple PHS rules for the same SF. - * To support multiple PHS rule, passing u8PHSI. - */ - PhsUpdateClassifierRule( - &Adapter->stBCMPhsContext, - uVCID, - sPhsRule->u8PHSI, - sPhsRule, - sPhsRule->u8PHSI); - } -} diff --git a/drivers/staging/bcm/CmHost.h b/drivers/staging/bcm/CmHost.h deleted file mode 100644 index 0887d3f49e2f..000000000000 --- a/drivers/staging/bcm/CmHost.h +++ /dev/null @@ -1,62 +0,0 @@ -/*************************************************************************** - * (c) Beceem Communications Inc. - * All Rights Reserved - * - * file : CmHost.h - * author: Rajeev Tirumala - * date : September 8 , 2006 - * brief : Definitions for Connection Management Requests structure - * which we will use to setup our connection structures.Its high - * time we had a header file for CmHost.cpp to isolate the way - * f/w sends DSx messages and the way we interpret them in code. - * Revision History - * - * Date Author Version Description - * 08-Sep-06 Rajeev 0.1 Created - ***************************************************************************/ -#ifndef _CM_HOST_H -#define _CM_HOST_H - -#pragma once -#pragma pack(push, 4) - -#define DSX_MESSAGE_EXCHANGE_BUFFER 0xBF60AC84 /* This contains the pointer */ -#define DSX_MESSAGE_EXCHANGE_BUFFER_SIZE 72000 /* 24 K Bytes */ - -struct bcm_add_indication_alt { - u8 u8Type; - u8 u8Direction; - u16 u16TID; - u16 u16CID; - u16 u16VCID; - struct bcm_connect_mgr_params sfAuthorizedSet; - struct bcm_connect_mgr_params sfAdmittedSet; - struct bcm_connect_mgr_params sfActiveSet; - u8 u8CC; /* < Confirmation Code */ - u8 u8Padd; - u16 u16Padd; -}; - -struct bcm_change_indication { - u8 u8Type; - u8 u8Direction; - u16 u16TID; - u16 u16CID; - u16 u16VCID; - struct bcm_connect_mgr_params sfAuthorizedSet; - struct bcm_connect_mgr_params sfAdmittedSet; - struct bcm_connect_mgr_params sfActiveSet; - u8 u8CC; /* < Confirmation Code */ - u8 u8Padd; - u16 u16Padd; -}; - -unsigned long StoreCmControlResponseMessage(struct bcm_mini_adapter *Adapter, void *pvBuffer, unsigned int *puBufferLength); -int AllocAdapterDsxBuffer(struct bcm_mini_adapter *Adapter); -int FreeAdapterDsxBuffer(struct bcm_mini_adapter *Adapter); -unsigned long SetUpTargetDsxBuffers(struct bcm_mini_adapter *Adapter); -bool CmControlResponseMessage(struct bcm_mini_adapter *Adapter, void *pvBuffer); - -#pragma pack(pop) - -#endif diff --git a/drivers/staging/bcm/DDRInit.c b/drivers/staging/bcm/DDRInit.c deleted file mode 100644 index 4226c931cd45..000000000000 --- a/drivers/staging/bcm/DDRInit.c +++ /dev/null @@ -1,1355 +0,0 @@ -#include "headers.h" - - - -#define DDR_DUMP_INTERNAL_DEVICE_MEMORY 0xBFC02B00 -#define MIPS_CLOCK_REG 0x0f000820 - -/* DDR INIT-133Mhz */ -#define T3_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 12 /* index for 0x0F007000 */ -static struct bcm_ddr_setting asT3_DDRSetting133MHz[] = { - /* DPLL Clock Setting */ - {0x0F000800, 0x00007212}, - {0x0f000820, 0x07F13FFF}, - {0x0f000810, 0x00000F95}, - {0x0f000860, 0x00000000}, - {0x0f000880, 0x000003DD}, - /* Changed source for X-bar and MIPS clock to APLL */ - {0x0f000840, 0x0FFF1B00}, - {0x0f000870, 0x00000002}, - {0x0F00a044, 0x1fffffff}, - {0x0F00a040, 0x1f000000}, - {0x0F00a084, 0x1Cffffff}, - {0x0F00a080, 0x1C000000}, - {0x0F00a04C, 0x0000000C}, - /* Memcontroller Default values */ - {0x0F007000, 0x00010001}, - {0x0F007004, 0x01010100}, - {0x0F007008, 0x01000001}, - {0x0F00700c, 0x00000000}, - {0x0F007010, 0x01000000}, - {0x0F007014, 0x01000100}, - {0x0F007018, 0x01000000}, - {0x0F00701c, 0x01020001}, - {0x0F007020, 0x04030107}, - {0x0F007024, 0x02000007}, - {0x0F007028, 0x02020202}, - {0x0F00702c, 0x0206060a}, - {0x0F007030, 0x05000000}, - {0x0F007034, 0x00000003}, - {0x0F007038, 0x110a0200}, - {0x0F00703C, 0x02101010}, - {0x0F007040, 0x45751200}, - {0x0F007044, 0x110a0d00}, - {0x0F007048, 0x081b0306}, - {0x0F00704c, 0x00000000}, - {0x0F007050, 0x0000001c}, - {0x0F007054, 0x00000000}, - {0x0F007058, 0x00000000}, - {0x0F00705c, 0x00000000}, - {0x0F007060, 0x0010246c}, - {0x0F007064, 0x00000010}, - {0x0F007068, 0x00000000}, - {0x0F00706c, 0x00000001}, - {0x0F007070, 0x00007000}, - {0x0F007074, 0x00000000}, - {0x0F007078, 0x00000000}, - {0x0F00707C, 0x00000000}, - {0x0F007080, 0x00000000}, - {0x0F007084, 0x00000000}, - /* Enable BW improvement within memory controller */ - {0x0F007094, 0x00000104}, - /* Enable 2 ports within X-bar */ - {0x0F00A000, 0x00000016}, - /* Enable start bit within memory controller */ - {0x0F007018, 0x01010000} -}; -/* 80Mhz */ -#define T3_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 10 /* index for 0x0F007000 */ -static struct bcm_ddr_setting asT3_DDRSetting80MHz[] = { - /* DPLL Clock Setting */ - {0x0f000810, 0x00000F95}, - {0x0f000820, 0x07f1ffff}, - {0x0f000860, 0x00000000}, - {0x0f000880, 0x000003DD}, - {0x0F00a044, 0x1fffffff}, - {0x0F00a040, 0x1f000000}, - {0x0F00a084, 0x1Cffffff}, - {0x0F00a080, 0x1C000000}, - {0x0F00a000, 0x00000016}, - {0x0F00a04C, 0x0000000C}, - /* Memcontroller Default values */ - {0x0F007000, 0x00010001}, - {0x0F007004, 0x01000000}, - {0x0F007008, 0x01000001}, - {0x0F00700c, 0x00000000}, - {0x0F007010, 0x01000000}, - {0x0F007014, 0x01000100}, - {0x0F007018, 0x01000000}, - {0x0F00701c, 0x01020000}, - {0x0F007020, 0x04020107}, - {0x0F007024, 0x00000007}, - {0x0F007028, 0x02020201}, - {0x0F00702c, 0x0204040a}, - {0x0F007030, 0x04000000}, - {0x0F007034, 0x00000002}, - {0x0F007038, 0x1F060200}, - {0x0F00703C, 0x1C22221F}, - {0x0F007040, 0x8A006600}, - {0x0F007044, 0x221a0800}, - {0x0F007048, 0x02690204}, - {0x0F00704c, 0x00000000}, - {0x0F007050, 0x0000001c}, - {0x0F007054, 0x00000000}, - {0x0F007058, 0x00000000}, - {0x0F00705c, 0x00000000}, - {0x0F007060, 0x000A15D6}, - {0x0F007064, 0x0000000A}, - {0x0F007068, 0x00000000}, - {0x0F00706c, 0x00000001}, - {0x0F007070, 0x00004000}, - {0x0F007074, 0x00000000}, - {0x0F007078, 0x00000000}, - {0x0F00707C, 0x00000000}, - {0x0F007080, 0x00000000}, - {0x0F007084, 0x00000000}, - {0x0F007094, 0x00000104}, - /* Enable start bit within memory controller */ - {0x0F007018, 0x01010000} -}; -/* 100Mhz */ -#define T3_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 13 /* index for 0x0F007000 */ -static struct bcm_ddr_setting asT3_DDRSetting100MHz[] = { - /* DPLL Clock Setting */ - {0x0F000800, 0x00007008}, - {0x0f000810, 0x00000F95}, - {0x0f000820, 0x07F13E3F}, - {0x0f000860, 0x00000000}, - {0x0f000880, 0x000003DD}, - /* Changed source for X-bar and MIPS clock to APLL */ - {0x0f000840, 0x0FFF1B00}, - {0x0f000870, 0x00000002}, - {0x0F00a044, 0x1fffffff}, - {0x0F00a040, 0x1f000000}, - {0x0F00a084, 0x1Cffffff}, - {0x0F00a080, 0x1C000000}, - {0x0F00a04C, 0x0000000C}, - /* Enable 2 ports within X-bar */ - {0x0F00A000, 0x00000016}, - /* Memcontroller Default values */ - {0x0F007000, 0x00010001}, - {0x0F007004, 0x01010100}, - {0x0F007008, 0x01000001}, - {0x0F00700c, 0x00000000}, - {0x0F007010, 0x01000000}, - {0x0F007014, 0x01000100}, - {0x0F007018, 0x01000000}, - {0x0F00701c, 0x01020001}, - {0x0F007020, 0x04020107}, - {0x0F007024, 0x00000007}, - {0x0F007028, 0x01020201}, - {0x0F00702c, 0x0204040A}, - {0x0F007030, 0x06000000}, - {0x0F007034, 0x00000004}, - {0x0F007038, 0x20080200}, - {0x0F00703C, 0x02030320}, - {0x0F007040, 0x6E7F1200}, - {0x0F007044, 0x01190A00}, - {0x0F007048, 0x06120305}, - {0x0F00704c, 0x00000000}, - {0x0F007050, 0x0000001C}, - {0x0F007054, 0x00000000}, - {0x0F007058, 0x00000000}, - {0x0F00705c, 0x00000000}, - {0x0F007060, 0x00082ED6}, - {0x0F007064, 0x0000000A}, - {0x0F007068, 0x00000000}, - {0x0F00706c, 0x00000001}, - {0x0F007070, 0x00005000}, - {0x0F007074, 0x00000000}, - {0x0F007078, 0x00000000}, - {0x0F00707C, 0x00000000}, - {0x0F007080, 0x00000000}, - {0x0F007084, 0x00000000}, - /* Enable BW improvement within memory controller */ - {0x0F007094, 0x00000104}, - /* Enable start bit within memory controller */ - {0x0F007018, 0x01010000} -}; - -/* Net T3B DDR Settings - * DDR INIT-133Mhz - */ -static struct bcm_ddr_setting asDPLL_266MHZ[] = { - {0x0F000800, 0x00007212}, - {0x0f000820, 0x07F13FFF}, - {0x0f000810, 0x00000F95}, - {0x0f000860, 0x00000000}, - {0x0f000880, 0x000003DD}, - /* Changed source for X-bar and MIPS clock to APLL */ - {0x0f000840, 0x0FFF1B00}, - {0x0f000870, 0x00000002} -}; - -#define T3B_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 11 /* index for 0x0F007000 */ -static struct bcm_ddr_setting asT3B_DDRSetting133MHz[] = { - /* DPLL Clock Setting */ - {0x0f000810, 0x00000F95}, - {0x0f000810, 0x00000F95}, - {0x0f000810, 0x00000F95}, - {0x0f000820, 0x07F13652}, - {0x0f000840, 0x0FFF0800}, - /* Changed source for X-bar and MIPS clock to APLL */ - {0x0f000880, 0x000003DD}, - {0x0f000860, 0x00000000}, - /* Changed source for X-bar and MIPS clock to APLL */ - {0x0F00a044, 0x1fffffff}, - {0x0F00a040, 0x1f000000}, - {0x0F00a084, 0x1Cffffff}, - {0x0F00a080, 0x1C000000}, - /* Enable 2 ports within X-bar */ - {0x0F00A000, 0x00000016}, - /* Memcontroller Default values */ - {0x0F007000, 0x00010001}, - {0x0F007004, 0x01010100}, - {0x0F007008, 0x01000001}, - {0x0F00700c, 0x00000000}, - {0x0F007010, 0x01000000}, - {0x0F007014, 0x01000100}, - {0x0F007018, 0x01000000}, - {0x0F00701c, 0x01020001}, - {0x0F007020, 0x04030107}, - {0x0F007024, 0x02000007}, - {0x0F007028, 0x02020202}, - {0x0F00702c, 0x0206060a}, - {0x0F007030, 0x05000000}, - {0x0F007034, 0x00000003}, - {0x0F007038, 0x130a0200}, - {0x0F00703C, 0x02101012}, - {0x0F007040, 0x457D1200}, - {0x0F007044, 0x11130d00}, - {0x0F007048, 0x040D0306}, - {0x0F00704c, 0x00000000}, - {0x0F007050, 0x0000001c}, - {0x0F007054, 0x00000000}, - {0x0F007058, 0x00000000}, - {0x0F00705c, 0x00000000}, - {0x0F007060, 0x0010246c}, - {0x0F007064, 0x00000012}, - {0x0F007068, 0x00000000}, - {0x0F00706c, 0x00000001}, - {0x0F007070, 0x00007000}, - {0x0F007074, 0x00000000}, - {0x0F007078, 0x00000000}, - {0x0F00707C, 0x00000000}, - {0x0F007080, 0x00000000}, - {0x0F007084, 0x00000000}, - /* Enable BW improvement within memory controller */ - {0x0F007094, 0x00000104}, - /* Enable start bit within memory controller */ - {0x0F007018, 0x01010000}, - }; - -#define T3B_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 9 /* index for 0x0F007000 */ -static struct bcm_ddr_setting asT3B_DDRSetting80MHz[] = { - /* DPLL Clock Setting */ - {0x0f000810, 0x00000F95}, - {0x0f000820, 0x07F13FFF}, - {0x0f000840, 0x0FFF1F00}, - {0x0f000880, 0x000003DD}, - {0x0f000860, 0x00000000}, - - {0x0F00a044, 0x1fffffff}, - {0x0F00a040, 0x1f000000}, - {0x0F00a084, 0x1Cffffff}, - {0x0F00a080, 0x1C000000}, - {0x0F00a000, 0x00000016}, - /* Memcontroller Default values */ - {0x0F007000, 0x00010001}, - {0x0F007004, 0x01000000}, - {0x0F007008, 0x01000001}, - {0x0F00700c, 0x00000000}, - {0x0F007010, 0x01000000}, - {0x0F007014, 0x01000100}, - {0x0F007018, 0x01000000}, - {0x0F00701c, 0x01020000}, - {0x0F007020, 0x04020107}, - {0x0F007024, 0x00000007}, - {0x0F007028, 0x02020201}, - {0x0F00702c, 0x0204040a}, - {0x0F007030, 0x04000000}, - {0x0F007034, 0x02000002}, - {0x0F007038, 0x1F060202}, - {0x0F00703C, 0x1C22221F}, - {0x0F007040, 0x8A006600}, - {0x0F007044, 0x221a0800}, - {0x0F007048, 0x02690204}, - {0x0F00704c, 0x00000000}, - {0x0F007050, 0x0100001c}, - {0x0F007054, 0x00000000}, - {0x0F007058, 0x00000000}, - {0x0F00705c, 0x00000000}, - {0x0F007060, 0x000A15D6}, - {0x0F007064, 0x0000000A}, - {0x0F007068, 0x00000000}, - {0x0F00706c, 0x00000001}, - {0x0F007070, 0x00004000}, - {0x0F007074, 0x00000000}, - {0x0F007078, 0x00000000}, - {0x0F00707C, 0x00000000}, - {0x0F007080, 0x00000000}, - {0x0F007084, 0x00000000}, - {0x0F007094, 0x00000104}, - /* Enable start bit within memory controller */ - {0x0F007018, 0x01010000} -}; - -/* 100Mhz */ -#define T3B_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 9 /* index for 0x0F007000 */ -static struct bcm_ddr_setting asT3B_DDRSetting100MHz[] = { - /* DPLL Clock Setting */ - {0x0f000810, 0x00000F95}, - {0x0f000820, 0x07F1369B}, - {0x0f000840, 0x0FFF0800}, - {0x0f000880, 0x000003DD}, - {0x0f000860, 0x00000000}, - {0x0F00a044, 0x1fffffff}, - {0x0F00a040, 0x1f000000}, - {0x0F00a084, 0x1Cffffff}, - {0x0F00a080, 0x1C000000}, - /* Enable 2 ports within X-bar */ - {0x0F00A000, 0x00000016}, - /* Memcontroller Default values */ - {0x0F007000, 0x00010001}, - {0x0F007004, 0x01010100}, - {0x0F007008, 0x01000001}, - {0x0F00700c, 0x00000000}, - {0x0F007010, 0x01000000}, - {0x0F007014, 0x01000100}, - {0x0F007018, 0x01000000}, - {0x0F00701c, 0x01020000}, - {0x0F007020, 0x04020107}, - {0x0F007024, 0x00000007}, - {0x0F007028, 0x01020201}, - {0x0F00702c, 0x0204040A}, - {0x0F007030, 0x06000000}, - {0x0F007034, 0x02000004}, - {0x0F007038, 0x20080200}, - {0x0F00703C, 0x02030320}, - {0x0F007040, 0x6E7F1200}, - {0x0F007044, 0x01190A00}, - {0x0F007048, 0x06120305}, - {0x0F00704c, 0x00000000}, - {0x0F007050, 0x0100001C}, - {0x0F007054, 0x00000000}, - {0x0F007058, 0x00000000}, - {0x0F00705c, 0x00000000}, - {0x0F007060, 0x00082ED6}, - {0x0F007064, 0x0000000A}, - {0x0F007068, 0x00000000}, - {0x0F00706c, 0x00000001}, - {0x0F007070, 0x00005000}, - {0x0F007074, 0x00000000}, - {0x0F007078, 0x00000000}, - {0x0F00707C, 0x00000000}, - {0x0F007080, 0x00000000}, - {0x0F007084, 0x00000000}, - /* Enable BW improvement within memory controller */ - {0x0F007094, 0x00000104}, - /* Enable start bit within memory controller */ - {0x0F007018, 0x01010000} -}; - - -#define T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 9 /* index for 0x0F007000 */ -static struct bcm_ddr_setting asT3LP_DDRSetting133MHz[] = { - /* DPLL Clock Setting */ - {0x0f000820, 0x03F1365B}, - {0x0f000810, 0x00002F95}, - {0x0f000880, 0x000003DD}, - /* Changed source for X-bar and MIPS clock to APLL */ - {0x0f000840, 0x0FFF0000}, - {0x0f000860, 0x00000000}, - {0x0F00a044, 0x1fffffff}, - {0x0F00a040, 0x1f000000}, - {0x0F00a084, 0x1Cffffff}, - {0x0F00a080, 0x1C000000}, - {0x0F00A000, 0x00000016}, - /* Memcontroller Default values */ - {0x0F007000, 0x00010001}, - {0x0F007004, 0x01010100}, - {0x0F007008, 0x01000001}, - {0x0F00700c, 0x00000000}, - {0x0F007010, 0x01000000}, - {0x0F007014, 0x01000100}, - {0x0F007018, 0x01000000}, - {0x0F00701c, 0x01020001}, - {0x0F007020, 0x04030107}, - {0x0F007024, 0x02000007}, - {0x0F007028, 0x02020200}, - {0x0F00702c, 0x0206060a}, - {0x0F007030, 0x05000000}, - {0x0F007034, 0x00000003}, - {0x0F007038, 0x200a0200}, - {0x0F00703C, 0x02101020}, - {0x0F007040, 0x45711200}, - {0x0F007044, 0x110D0D00}, - {0x0F007048, 0x04080306}, - {0x0F00704c, 0x00000000}, - {0x0F007050, 0x0100001c}, - {0x0F007054, 0x00000000}, - {0x0F007058, 0x00000000}, - {0x0F00705c, 0x00000000}, - {0x0F007060, 0x0010245F}, - {0x0F007064, 0x00000010}, - {0x0F007068, 0x00000000}, - {0x0F00706c, 0x00000001}, - {0x0F007070, 0x00007000}, - {0x0F007074, 0x00000000}, - {0x0F007078, 0x00000000}, - {0x0F00707C, 0x00000000}, - {0x0F007080, 0x00000000}, - {0x0F007084, 0x00000000}, - {0x0F007088, 0x01000001}, - {0x0F00708c, 0x00000101}, - {0x0F007090, 0x00000000}, - /* Enable BW improvement within memory controller */ - {0x0F007094, 0x00040000}, - {0x0F007098, 0x00000000}, - {0x0F0070c8, 0x00000104}, - /* Enable 2 ports within X-bar */ - /* Enable start bit within memory controller */ - {0x0F007018, 0x01010000} -}; - -#define T3LP_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 11 /* index for 0x0F007000 */ -static struct bcm_ddr_setting asT3LP_DDRSetting100MHz[] = { - /* DPLL Clock Setting */ - {0x0f000810, 0x00002F95}, - {0x0f000820, 0x03F1369B}, - {0x0f000840, 0x0fff0000}, - {0x0f000860, 0x00000000}, - {0x0f000880, 0x000003DD}, - /* Changed source for X-bar and MIPS clock to APLL */ - {0x0f000840, 0x0FFF0000}, - {0x0F00a044, 0x1fffffff}, - {0x0F00a040, 0x1f000000}, - {0x0F00a084, 0x1Cffffff}, - {0x0F00a080, 0x1C000000}, - /* Memcontroller Default values */ - {0x0F007000, 0x00010001}, - {0x0F007004, 0x01010100}, - {0x0F007008, 0x01000001}, - {0x0F00700c, 0x00000000}, - {0x0F007010, 0x01000000}, - {0x0F007014, 0x01000100}, - {0x0F007018, 0x01000000}, - {0x0F00701c, 0x01020000}, - {0x0F007020, 0x04020107}, - {0x0F007024, 0x00000007}, - {0x0F007028, 0x01020200}, - {0x0F00702c, 0x0204040a}, - {0x0F007030, 0x06000000}, - {0x0F007034, 0x00000004}, - {0x0F007038, 0x1F080200}, - {0x0F00703C, 0x0203031F}, - {0x0F007040, 0x6e001200}, - {0x0F007044, 0x011a0a00}, - {0x0F007048, 0x03000305}, - {0x0F00704c, 0x00000000}, - {0x0F007050, 0x0100001c}, - {0x0F007054, 0x00000000}, - {0x0F007058, 0x00000000}, - {0x0F00705c, 0x00000000}, - {0x0F007060, 0x00082ED6}, - {0x0F007064, 0x0000000A}, - {0x0F007068, 0x00000000}, - {0x0F00706c, 0x00000001}, - {0x0F007070, 0x00005000}, - {0x0F007074, 0x00000000}, - {0x0F007078, 0x00000000}, - {0x0F00707C, 0x00000000}, - {0x0F007080, 0x00000000}, - {0x0F007084, 0x00000000}, - {0x0F007088, 0x01000001}, - {0x0F00708c, 0x00000101}, - {0x0F007090, 0x00000000}, - {0x0F007094, 0x00010000}, - {0x0F007098, 0x00000000}, - {0x0F0070C8, 0x00000104}, - /* Enable 2 ports within X-bar */ - {0x0F00A000, 0x00000016}, - /* Enable start bit within memory controller */ - {0x0F007018, 0x01010000} -}; - -#define T3LP_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 9 /* index for 0x0F007000 */ -static struct bcm_ddr_setting asT3LP_DDRSetting80MHz[] = { - /* DPLL Clock Setting */ - {0x0f000820, 0x07F13FFF}, - {0x0f000810, 0x00002F95}, - {0x0f000860, 0x00000000}, - {0x0f000880, 0x000003DD}, - {0x0f000840, 0x0FFF1F00}, - {0x0F00a044, 0x1fffffff}, - {0x0F00a040, 0x1f000000}, - {0x0F00a084, 0x1Cffffff}, - {0x0F00a080, 0x1C000000}, - {0x0F00A000, 0x00000016}, - {0x0f007000, 0x00010001}, - {0x0f007004, 0x01000000}, - {0x0f007008, 0x01000001}, - {0x0f00700c, 0x00000000}, - {0x0f007010, 0x01000000}, - {0x0f007014, 0x01000100}, - {0x0f007018, 0x01000000}, - {0x0f00701c, 0x01020000}, - {0x0f007020, 0x04020107}, - {0x0f007024, 0x00000007}, - {0x0f007028, 0x02020200}, - {0x0f00702c, 0x0204040a}, - {0x0f007030, 0x04000000}, - {0x0f007034, 0x00000002}, - {0x0f007038, 0x1d060200}, - {0x0f00703c, 0x1c22221d}, - {0x0f007040, 0x8A116600}, - {0x0f007044, 0x222d0800}, - {0x0f007048, 0x02690204}, - {0x0f00704c, 0x00000000}, - {0x0f007050, 0x0100001c}, - {0x0f007054, 0x00000000}, - {0x0f007058, 0x00000000}, - {0x0f00705c, 0x00000000}, - {0x0f007060, 0x000A15D6}, - {0x0f007064, 0x0000000A}, - {0x0f007068, 0x00000000}, - {0x0f00706c, 0x00000001}, - {0x0f007070, 0x00004000}, - {0x0f007074, 0x00000000}, - {0x0f007078, 0x00000000}, - {0x0f00707c, 0x00000000}, - {0x0f007080, 0x00000000}, - {0x0f007084, 0x00000000}, - {0x0f007088, 0x01000001}, - {0x0f00708c, 0x00000101}, - {0x0f007090, 0x00000000}, - {0x0f007094, 0x00010000}, - {0x0f007098, 0x00000000}, - {0x0F0070C8, 0x00000104}, - {0x0F007018, 0x01010000} -}; - - - - -/* T3 LP-B (UMA-B) */ - -#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_160MHZ 7 /* index for 0x0F007000 */ -static struct bcm_ddr_setting asT3LPB_DDRSetting160MHz[] = { - /* DPLL Clock Setting */ - {0x0f000820, 0x03F137DB}, - {0x0f000810, 0x01842795}, - {0x0f000860, 0x00000000}, - {0x0f000880, 0x000003DD}, - {0x0f000840, 0x0FFF0400}, - {0x0F00a044, 0x1fffffff}, - {0x0F00a040, 0x1f000000}, - {0x0f003050, 0x00000021}, /* this is flash/eeprom clock divisor which - * set the flash clock to 20 MHz */ - {0x0F00a084, 0x1Cffffff}, /* Now dump from her in internal memory */ - {0x0F00a080, 0x1C000000}, - {0x0F00A000, 0x00000016}, - {0x0f007000, 0x00010001}, - {0x0f007004, 0x01000001}, - {0x0f007008, 0x01000101}, - {0x0f00700c, 0x00000000}, - {0x0f007010, 0x01000100}, - {0x0f007014, 0x01000100}, - {0x0f007018, 0x01000000}, - {0x0f00701c, 0x01020000}, - {0x0f007020, 0x04030107}, - {0x0f007024, 0x02000007}, - {0x0f007028, 0x02020200}, - {0x0f00702c, 0x0206060a}, - {0x0f007030, 0x050d0d00}, - {0x0f007034, 0x00000003}, - {0x0f007038, 0x170a0200}, - {0x0f00703c, 0x02101012}, - {0x0f007040, 0x45161200}, - {0x0f007044, 0x11250c00}, - {0x0f007048, 0x04da0307}, - {0x0f00704c, 0x00000000}, - {0x0f007050, 0x0000001c}, - {0x0f007054, 0x00000000}, - {0x0f007058, 0x00000000}, - {0x0f00705c, 0x00000000}, - {0x0f007060, 0x00142bb6}, - {0x0f007064, 0x20430014}, - {0x0f007068, 0x00000000}, - {0x0f00706c, 0x00000001}, - {0x0f007070, 0x00009000}, - {0x0f007074, 0x00000000}, - {0x0f007078, 0x00000000}, - {0x0f00707c, 0x00000000}, - {0x0f007080, 0x00000000}, - {0x0f007084, 0x00000000}, - {0x0f007088, 0x01000001}, - {0x0f00708c, 0x00000101}, - {0x0f007090, 0x00000000}, - {0x0f007094, 0x00040000}, - {0x0f007098, 0x00000000}, - {0x0F0070C8, 0x00000104}, - {0x0F007018, 0x01010000} -}; - - -#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 7 /* index for 0x0F007000 */ -static struct bcm_ddr_setting asT3LPB_DDRSetting133MHz[] = { - /* DPLL Clock Setting */ - {0x0f000820, 0x03F1365B}, - {0x0f000810, 0x00002F95}, - {0x0f000880, 0x000003DD}, - /* Changed source for X-bar and MIPS clock to APLL */ - {0x0f000840, 0x0FFF0000}, - {0x0f000860, 0x00000000}, - {0x0F00a044, 0x1fffffff}, - {0x0F00a040, 0x1f000000}, - {0x0f003050, 0x00000021}, /* flash/eeprom clock divisor which - * set the flash clock to 20 MHz */ - {0x0F00a084, 0x1Cffffff}, /* dump from here in internal memory */ - {0x0F00a080, 0x1C000000}, - {0x0F00A000, 0x00000016}, - /* Memcontroller Default values */ - {0x0F007000, 0x00010001}, - {0x0F007004, 0x01010100}, - {0x0F007008, 0x01000001}, - {0x0F00700c, 0x00000000}, - {0x0F007010, 0x01000000}, - {0x0F007014, 0x01000100}, - {0x0F007018, 0x01000000}, - {0x0F00701c, 0x01020001}, - {0x0F007020, 0x04030107}, - {0x0F007024, 0x02000007}, - {0x0F007028, 0x02020200}, - {0x0F00702c, 0x0206060a}, - {0x0F007030, 0x05000000}, - {0x0F007034, 0x00000003}, - {0x0F007038, 0x190a0200}, - {0x0F00703C, 0x02101017}, - {0x0F007040, 0x45171200}, - {0x0F007044, 0x11290D00}, - {0x0F007048, 0x04080306}, - {0x0F00704c, 0x00000000}, - {0x0F007050, 0x0100001c}, - {0x0F007054, 0x00000000}, - {0x0F007058, 0x00000000}, - {0x0F00705c, 0x00000000}, - {0x0F007060, 0x0010245F}, - {0x0F007064, 0x00000010}, - {0x0F007068, 0x00000000}, - {0x0F00706c, 0x00000001}, - {0x0F007070, 0x00007000}, - {0x0F007074, 0x00000000}, - {0x0F007078, 0x00000000}, - {0x0F00707C, 0x00000000}, - {0x0F007080, 0x00000000}, - {0x0F007084, 0x00000000}, - {0x0F007088, 0x01000001}, - {0x0F00708c, 0x00000101}, - {0x0F007090, 0x00000000}, - /* Enable BW improvement within memory controller */ - {0x0F007094, 0x00040000}, - {0x0F007098, 0x00000000}, - {0x0F0070c8, 0x00000104}, - /* Enable 2 ports within X-bar */ - /* Enable start bit within memory controller */ - {0x0F007018, 0x01010000} -}; - -#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 8 /* index for 0x0F007000 */ -static struct bcm_ddr_setting asT3LPB_DDRSetting100MHz[] = { - /* DPLL Clock Setting */ - {0x0f000810, 0x00002F95}, - {0x0f000820, 0x03F1369B}, - {0x0f000840, 0x0fff0000}, - {0x0f000860, 0x00000000}, - {0x0f000880, 0x000003DD}, - /* Changed source for X-bar and MIPS clock to APLL */ - {0x0f000840, 0x0FFF0000}, - {0x0F00a044, 0x1fffffff}, - {0x0F00a040, 0x1f000000}, - {0x0f003050, 0x00000021}, /* flash/eeprom clock divisor which - * set the flash clock to 20 MHz */ - {0x0F00a084, 0x1Cffffff}, /* dump from here in internal memory */ - {0x0F00a080, 0x1C000000}, - /* Memcontroller Default values */ - {0x0F007000, 0x00010001}, - {0x0F007004, 0x01010100}, - {0x0F007008, 0x01000001}, - {0x0F00700c, 0x00000000}, - {0x0F007010, 0x01000000}, - {0x0F007014, 0x01000100}, - {0x0F007018, 0x01000000}, - {0x0F00701c, 0x01020000}, - {0x0F007020, 0x04020107}, - {0x0F007024, 0x00000007}, - {0x0F007028, 0x01020200}, - {0x0F00702c, 0x0204040a}, - {0x0F007030, 0x06000000}, - {0x0F007034, 0x00000004}, - {0x0F007038, 0x1F080200}, - {0x0F00703C, 0x0203031F}, - {0x0F007040, 0x6e001200}, - {0x0F007044, 0x011a0a00}, - {0x0F007048, 0x03000305}, - {0x0F00704c, 0x00000000}, - {0x0F007050, 0x0100001c}, - {0x0F007054, 0x00000000}, - {0x0F007058, 0x00000000}, - {0x0F00705c, 0x00000000}, - {0x0F007060, 0x00082ED6}, - {0x0F007064, 0x0000000A}, - {0x0F007068, 0x00000000}, - {0x0F00706c, 0x00000001}, - {0x0F007070, 0x00005000}, - {0x0F007074, 0x00000000}, - {0x0F007078, 0x00000000}, - {0x0F00707C, 0x00000000}, - {0x0F007080, 0x00000000}, - {0x0F007084, 0x00000000}, - {0x0F007088, 0x01000001}, - {0x0F00708c, 0x00000101}, - {0x0F007090, 0x00000000}, - {0x0F007094, 0x00010000}, - {0x0F007098, 0x00000000}, - {0x0F0070C8, 0x00000104}, - /* Enable 2 ports within X-bar */ - {0x0F00A000, 0x00000016}, - /* Enable start bit within memory controller */ - {0x0F007018, 0x01010000} -}; - -#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 7 /* index for 0x0F007000 */ -static struct bcm_ddr_setting asT3LPB_DDRSetting80MHz[] = { - /* DPLL Clock Setting */ - {0x0f000820, 0x07F13FFF}, - {0x0f000810, 0x00002F95}, - {0x0f000860, 0x00000000}, - {0x0f000880, 0x000003DD}, - {0x0f000840, 0x0FFF1F00}, - {0x0F00a044, 0x1fffffff}, - {0x0F00a040, 0x1f000000}, - {0x0f003050, 0x00000021}, /* flash/eeprom clock divisor - * which set the flash clock to 20 MHz */ - {0x0F00a084, 0x1Cffffff}, /* dump from here in internal memory */ - {0x0F00a080, 0x1C000000}, - {0x0F00A000, 0x00000016}, - {0x0f007000, 0x00010001}, - {0x0f007004, 0x01000000}, - {0x0f007008, 0x01000001}, - {0x0f00700c, 0x00000000}, - {0x0f007010, 0x01000000}, - {0x0f007014, 0x01000100}, - {0x0f007018, 0x01000000}, - {0x0f00701c, 0x01020000}, - {0x0f007020, 0x04020107}, - {0x0f007024, 0x00000007}, - {0x0f007028, 0x02020200}, - {0x0f00702c, 0x0204040a}, - {0x0f007030, 0x04000000}, - {0x0f007034, 0x00000002}, - {0x0f007038, 0x1d060200}, - {0x0f00703c, 0x1c22221d}, - {0x0f007040, 0x8A116600}, - {0x0f007044, 0x222d0800}, - {0x0f007048, 0x02690204}, - {0x0f00704c, 0x00000000}, - {0x0f007050, 0x0100001c}, - {0x0f007054, 0x00000000}, - {0x0f007058, 0x00000000}, - {0x0f00705c, 0x00000000}, - {0x0f007060, 0x000A15D6}, - {0x0f007064, 0x0000000A}, - {0x0f007068, 0x00000000}, - {0x0f00706c, 0x00000001}, - {0x0f007070, 0x00004000}, - {0x0f007074, 0x00000000}, - {0x0f007078, 0x00000000}, - {0x0f00707c, 0x00000000}, - {0x0f007080, 0x00000000}, - {0x0f007084, 0x00000000}, - {0x0f007088, 0x01000001}, - {0x0f00708c, 0x00000101}, - {0x0f007090, 0x00000000}, - {0x0f007094, 0x00010000}, - {0x0f007098, 0x00000000}, - {0x0F0070C8, 0x00000104}, - {0x0F007018, 0x01010000} -}; - - -int ddr_init(struct bcm_mini_adapter *Adapter) -{ - struct bcm_ddr_setting *psDDRSetting = NULL; - ULONG RegCount = 0; - UINT value = 0; - UINT uiResetValue = 0; - UINT uiClockSetting = 0; - int retval = STATUS_SUCCESS; - - switch (Adapter->chip_id) { - case 0xbece3200: - switch (Adapter->DDRSetting) { - case DDR_80_MHZ: - psDDRSetting = asT3LP_DDRSetting80MHz; - RegCount = (sizeof(asT3LP_DDRSetting80MHz) / - sizeof(struct bcm_ddr_setting)); - break; - case DDR_100_MHZ: - psDDRSetting = asT3LP_DDRSetting100MHz; - RegCount = (sizeof(asT3LP_DDRSetting100MHz) / - sizeof(struct bcm_ddr_setting)); - break; - case DDR_133_MHZ: - psDDRSetting = asT3LP_DDRSetting133MHz; - RegCount = (sizeof(asT3LP_DDRSetting133MHz) / - sizeof(struct bcm_ddr_setting)); - if (Adapter->bMipsConfig == MIPS_200_MHZ) - uiClockSetting = 0x03F13652; - else - uiClockSetting = 0x03F1365B; - break; - default: - return -EINVAL; - } - - break; - case T3LPB: - case BCS220_2: - case BCS220_2BC: - case BCS250_BC: - case BCS220_3: - /* Set bit 2 and bit 6 to 1 for BBIC 2mA drive - * (please check current value and additionally set these bits) - */ - if ((Adapter->chip_id != BCS220_2) && - (Adapter->chip_id != BCS220_2BC) && - (Adapter->chip_id != BCS220_3)) { - retval = rdmalt(Adapter, (UINT)0x0f000830, &uiResetValue, - sizeof(uiResetValue)); - if (retval < 0) { - BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, - "%s:%d RDM failed\n", - __func__, __LINE__); - return retval; - } - uiResetValue |= 0x44; - retval = wrmalt(Adapter, (UINT)0x0f000830, &uiResetValue, - sizeof(uiResetValue)); - if (retval < 0) { - BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, - "%s:%d RDM failed\n", - __func__, __LINE__); - return retval; - } - } - switch (Adapter->DDRSetting) { - - - - case DDR_80_MHZ: - psDDRSetting = asT3LPB_DDRSetting80MHz; - RegCount = (sizeof(asT3B_DDRSetting80MHz) / - sizeof(struct bcm_ddr_setting)); - break; - case DDR_100_MHZ: - psDDRSetting = asT3LPB_DDRSetting100MHz; - RegCount = (sizeof(asT3B_DDRSetting100MHz) / - sizeof(struct bcm_ddr_setting)); - break; - case DDR_133_MHZ: - psDDRSetting = asT3LPB_DDRSetting133MHz; - RegCount = (sizeof(asT3B_DDRSetting133MHz) / - sizeof(struct bcm_ddr_setting)); - - if (Adapter->bMipsConfig == MIPS_200_MHZ) - uiClockSetting = 0x03F13652; - else - uiClockSetting = 0x03F1365B; - break; - - case DDR_160_MHZ: - psDDRSetting = asT3LPB_DDRSetting160MHz; - RegCount = sizeof(asT3LPB_DDRSetting160MHz) / - sizeof(struct bcm_ddr_setting); - - if (Adapter->bMipsConfig == MIPS_200_MHZ) - uiClockSetting = 0x03F137D2; - else - uiClockSetting = 0x03F137DB; - } - break; - - case 0xbece0110: - case 0xbece0120: - case 0xbece0121: - case 0xbece0130: - case 0xbece0300: - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, - "DDR Setting: %x\n", Adapter->DDRSetting); - switch (Adapter->DDRSetting) { - case DDR_80_MHZ: - psDDRSetting = asT3_DDRSetting80MHz; - RegCount = (sizeof(asT3_DDRSetting80MHz) / - sizeof(struct bcm_ddr_setting)); - break; - case DDR_100_MHZ: - psDDRSetting = asT3_DDRSetting100MHz; - RegCount = (sizeof(asT3_DDRSetting100MHz) / - sizeof(struct bcm_ddr_setting)); - break; - case DDR_133_MHZ: - psDDRSetting = asT3_DDRSetting133MHz; - RegCount = (sizeof(asT3_DDRSetting133MHz) / - sizeof(struct bcm_ddr_setting)); - break; - default: - return -EINVAL; - } - case 0xbece0310: - { - switch (Adapter->DDRSetting) { - case DDR_80_MHZ: - psDDRSetting = asT3B_DDRSetting80MHz; - RegCount = (sizeof(asT3B_DDRSetting80MHz) / - sizeof(struct bcm_ddr_setting)); - break; - case DDR_100_MHZ: - psDDRSetting = asT3B_DDRSetting100MHz; - RegCount = (sizeof(asT3B_DDRSetting100MHz) / - sizeof(struct bcm_ddr_setting)); - break; - case DDR_133_MHZ: - - /* 266Mhz PLL selected. */ - if (Adapter->bDPLLConfig == PLL_266_MHZ) { - memcpy(asT3B_DDRSetting133MHz, asDPLL_266MHZ, - sizeof(asDPLL_266MHZ)); - psDDRSetting = asT3B_DDRSetting133MHz; - RegCount = (sizeof(asT3B_DDRSetting133MHz) / - sizeof(struct bcm_ddr_setting)); - } else { - psDDRSetting = asT3B_DDRSetting133MHz; - RegCount = (sizeof(asT3B_DDRSetting133MHz) / - sizeof(struct bcm_ddr_setting)); - if (Adapter->bMipsConfig == MIPS_200_MHZ) - uiClockSetting = 0x07F13652; - else - uiClockSetting = 0x07F1365B; - } - break; - default: - return -EINVAL; - } - break; - - } - default: - return -EINVAL; - } - - value = 0; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, - "Register Count is =%lu\n", RegCount); - while (RegCount && !retval) { - if (uiClockSetting - && psDDRSetting->ulRegAddress == MIPS_CLOCK_REG) - value = uiClockSetting; - else - value = psDDRSetting->ulRegValue; - retval = wrmalt(Adapter, psDDRSetting->ulRegAddress, &value, - sizeof(value)); - if (STATUS_SUCCESS != retval) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, - "%s:%d\n", __func__, __LINE__); - break; - } - - RegCount--; - psDDRSetting++; - } - - if (Adapter->chip_id >= 0xbece3300) { - - mdelay(3); - if ((Adapter->chip_id != BCS220_2) && - (Adapter->chip_id != BCS220_2BC) && - (Adapter->chip_id != BCS220_3)) { - /* drive MDDR to half in case of UMA-B: */ - uiResetValue = 0x01010001; - retval = wrmalt(Adapter, (UINT)0x0F007018, - &uiResetValue, sizeof(uiResetValue)); - if (retval < 0) { - BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, - DBG_LVL_ALL, - "%s:%d RDM failed\n", - __func__, - __LINE__); - return retval; - } - uiResetValue = 0x00040020; - retval = wrmalt(Adapter, (UINT)0x0F007094, - &uiResetValue, sizeof(uiResetValue)); - if (retval < 0) { - BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, - DBG_LVL_ALL, - "%s:%d RDM failed\n", - __func__, - __LINE__); - return retval; - } - uiResetValue = 0x01020101; - retval = wrmalt(Adapter, (UINT)0x0F00701c, - &uiResetValue, sizeof(uiResetValue)); - if (retval < 0) { - BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, - DBG_LVL_ALL, - "%s:%d RDM failed\n", - __func__, - __LINE__); - return retval; - } - uiResetValue = 0x01010000; - retval = wrmalt(Adapter, (UINT)0x0F007018, - &uiResetValue, sizeof(uiResetValue)); - if (retval < 0) { - BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, - DBG_LVL_ALL, - "%s:%d RDM failed\n", - __func__, - __LINE__); - return retval; - } - } - mdelay(3); - - /* DC/DC standby change... - * This is to be done only for Hybrid PMU mode. - * with the current h/w there is no way to detect this. - * and since we dont have internal PMU lets do it under - * UMA-B chip id. we will change this when we will have - * internal PMU. - */ - if (Adapter->PmuMode == HYBRID_MODE_7C) { - retval = rdmalt(Adapter, (UINT)0x0f000c00, - &uiResetValue, sizeof(uiResetValue)); - if (retval < 0) { - BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, - DBG_LVL_ALL, - "%s:%d RDM failed\n", - __func__, - __LINE__); - return retval; - } - retval = rdmalt(Adapter, (UINT)0x0f000c00, - &uiResetValue, sizeof(uiResetValue)); - if (retval < 0) { - BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, - DBG_LVL_ALL, - "%s:%d RDM failed\n", - __func__, - __LINE__); - return retval; - } - uiResetValue = 0x1322a8; - retval = wrmalt(Adapter, (UINT)0x0f000d1c, - &uiResetValue, sizeof(uiResetValue)); - if (retval < 0) { - BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, - DBG_LVL_ALL, - "%s:%d RDM failed\n", - __func__, - __LINE__); - return retval; - } - retval = rdmalt(Adapter, (UINT)0x0f000c00, - &uiResetValue, sizeof(uiResetValue)); - if (retval < 0) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, RDM, - DBG_LVL_ALL, - "%s:%d RDM failed\n", - __func__, - __LINE__); - return retval; - } - retval = rdmalt(Adapter, (UINT)0x0f000c00, - &uiResetValue, sizeof(uiResetValue)); - if (retval < 0) { - BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, - DBG_LVL_ALL, - "%s:%d RDM failed\n", - __func__, - __LINE__); - return retval; - } - uiResetValue = 0x132296; - retval = wrmalt(Adapter, (UINT)0x0f000d14, - &uiResetValue, sizeof(uiResetValue)); - if (retval < 0) { - BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, - DBG_LVL_ALL, - "%s:%d RDM failed\n", - __func__, - __LINE__); - return retval; - } - } else if (Adapter->PmuMode == HYBRID_MODE_6) { - - retval = rdmalt(Adapter, (UINT)0x0f000c00, - &uiResetValue, sizeof(uiResetValue)); - if (retval < 0) { - BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, - DBG_LVL_ALL, - "%s:%d RDM failed\n", - __func__, - __LINE__); - return retval; - } - retval = rdmalt(Adapter, (UINT)0x0f000c00, - &uiResetValue, sizeof(uiResetValue)); - if (retval < 0) { - BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, - DBG_LVL_ALL, - "%s:%d RDM failed\n", - __func__, - __LINE__); - return retval; - } - uiResetValue = 0x6003229a; - retval = wrmalt(Adapter, (UINT)0x0f000d14, - &uiResetValue, sizeof(uiResetValue)); - if (retval < 0) { - BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, - DBG_LVL_ALL, - "%s:%d RDM failed\n", - __func__, - __LINE__); - return retval; - } - retval = rdmalt(Adapter, (UINT)0x0f000c00, - &uiResetValue, sizeof(uiResetValue)); - if (retval < 0) { - BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, - DBG_LVL_ALL, - "%s:%d RDM failed\n", - __func__, - __LINE__); - return retval; - } - retval = rdmalt(Adapter, (UINT)0x0f000c00, - &uiResetValue, sizeof(uiResetValue)); - if (retval < 0) { - BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, - DBG_LVL_ALL, - "%s:%d RDM failed\n", - __func__, - __LINE__); - return retval; - } - uiResetValue = 0x1322a8; - retval = wrmalt(Adapter, (UINT)0x0f000d1c, - &uiResetValue, sizeof(uiResetValue)); - if (retval < 0) { - BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, - DBG_LVL_ALL, - "%s:%d RDM failed\n", - __func__, - __LINE__); - return retval; - } - } - - } - Adapter->bDDRInitDone = TRUE; - return retval; -} - -int download_ddr_settings(struct bcm_mini_adapter *Adapter) -{ - struct bcm_ddr_setting *psDDRSetting = NULL; - ULONG RegCount = 0; - unsigned long ul_ddr_setting_load_addr = - DDR_DUMP_INTERNAL_DEVICE_MEMORY; - UINT value = 0; - int retval = STATUS_SUCCESS; - bool bOverrideSelfRefresh = false; - - switch (Adapter->chip_id) { - case 0xbece3200: - switch (Adapter->DDRSetting) { - case DDR_80_MHZ: - psDDRSetting = asT3LP_DDRSetting80MHz; - RegCount = ARRAY_SIZE(asT3LP_DDRSetting80MHz); - RegCount -= T3LP_SKIP_CLOCK_PROGRAM_DUMP_80MHZ; - psDDRSetting += T3LP_SKIP_CLOCK_PROGRAM_DUMP_80MHZ; - break; - case DDR_100_MHZ: - psDDRSetting = asT3LP_DDRSetting100MHz; - RegCount = ARRAY_SIZE(asT3LP_DDRSetting100MHz); - RegCount -= T3LP_SKIP_CLOCK_PROGRAM_DUMP_100MHZ; - psDDRSetting += T3LP_SKIP_CLOCK_PROGRAM_DUMP_100MHZ; - break; - case DDR_133_MHZ: - bOverrideSelfRefresh = TRUE; - psDDRSetting = asT3LP_DDRSetting133MHz; - RegCount = ARRAY_SIZE(asT3LP_DDRSetting133MHz); - RegCount -= T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ; - psDDRSetting += T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ; - break; - default: - return -EINVAL; - } - break; - - case T3LPB: - case BCS220_2: - case BCS220_2BC: - case BCS250_BC: - case BCS220_3: - switch (Adapter->DDRSetting) { - case DDR_80_MHZ: - psDDRSetting = asT3LPB_DDRSetting80MHz; - RegCount = ARRAY_SIZE(asT3LPB_DDRSetting80MHz); - RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_80MHZ; - psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_80MHZ; - break; - case DDR_100_MHZ: - psDDRSetting = asT3LPB_DDRSetting100MHz; - RegCount = ARRAY_SIZE(asT3LPB_DDRSetting100MHz); - RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_100MHZ; - psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_100MHZ; - break; - case DDR_133_MHZ: - bOverrideSelfRefresh = TRUE; - psDDRSetting = asT3LPB_DDRSetting133MHz; - RegCount = ARRAY_SIZE(asT3LPB_DDRSetting133MHz); - RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_133MHZ; - psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_133MHZ; - break; - - case DDR_160_MHZ: - bOverrideSelfRefresh = TRUE; - psDDRSetting = asT3LPB_DDRSetting160MHz; - RegCount = ARRAY_SIZE(asT3LPB_DDRSetting160MHz); - RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_160MHZ; - psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_160MHZ; - - break; - default: - return -EINVAL; - } - break; - case 0xbece0300: - switch (Adapter->DDRSetting) { - case DDR_80_MHZ: - psDDRSetting = asT3_DDRSetting80MHz; - RegCount = ARRAY_SIZE(asT3_DDRSetting80MHz); - RegCount -= T3_SKIP_CLOCK_PROGRAM_DUMP_80MHZ; - psDDRSetting += T3_SKIP_CLOCK_PROGRAM_DUMP_80MHZ; - break; - case DDR_100_MHZ: - psDDRSetting = asT3_DDRSetting100MHz; - RegCount = ARRAY_SIZE(asT3_DDRSetting100MHz); - RegCount -= T3_SKIP_CLOCK_PROGRAM_DUMP_100MHZ; - psDDRSetting += T3_SKIP_CLOCK_PROGRAM_DUMP_100MHZ; - break; - case DDR_133_MHZ: - psDDRSetting = asT3_DDRSetting133MHz; - RegCount = ARRAY_SIZE(asT3_DDRSetting133MHz); - RegCount -= T3_SKIP_CLOCK_PROGRAM_DUMP_133MHZ; - psDDRSetting += T3_SKIP_CLOCK_PROGRAM_DUMP_133MHZ; - break; - default: - return -EINVAL; - } - break; - case 0xbece0310: - { - switch (Adapter->DDRSetting) { - case DDR_80_MHZ: - psDDRSetting = asT3B_DDRSetting80MHz; - RegCount = ARRAY_SIZE(asT3B_DDRSetting80MHz); - RegCount -= T3B_SKIP_CLOCK_PROGRAM_DUMP_80MHZ; - psDDRSetting += T3B_SKIP_CLOCK_PROGRAM_DUMP_80MHZ; - break; - case DDR_100_MHZ: - psDDRSetting = asT3B_DDRSetting100MHz; - RegCount = ARRAY_SIZE(asT3B_DDRSetting100MHz); - RegCount -= T3B_SKIP_CLOCK_PROGRAM_DUMP_100MHZ; - psDDRSetting += T3B_SKIP_CLOCK_PROGRAM_DUMP_100MHZ; - break; - case DDR_133_MHZ: - bOverrideSelfRefresh = TRUE; - psDDRSetting = asT3B_DDRSetting133MHz; - RegCount = ARRAY_SIZE(asT3B_DDRSetting133MHz); - RegCount -= T3B_SKIP_CLOCK_PROGRAM_DUMP_133MHZ; - psDDRSetting += T3B_SKIP_CLOCK_PROGRAM_DUMP_133MHZ; - break; - } - break; - } - default: - return -EINVAL; - } - /* total number of Register that has to be dumped */ - value = RegCount; - retval = wrmalt(Adapter, ul_ddr_setting_load_addr, &value, - sizeof(value)); - if (retval) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, - "%s:%d\n", __func__, __LINE__); - - return retval; - } - ul_ddr_setting_load_addr += sizeof(ULONG); - /* signature */ - value = (0x1d1e0dd0); - retval = wrmalt(Adapter, ul_ddr_setting_load_addr, &value, - sizeof(value)); - if (retval) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, - "%s:%d\n", __func__, __LINE__); - return retval; - } - - ul_ddr_setting_load_addr += sizeof(ULONG); - RegCount *= (sizeof(struct bcm_ddr_setting)/sizeof(ULONG)); - - while (RegCount && !retval) { - value = psDDRSetting->ulRegAddress; - retval = wrmalt(Adapter, ul_ddr_setting_load_addr, &value, - sizeof(value)); - ul_ddr_setting_load_addr += sizeof(ULONG); - if (!retval) { - if (bOverrideSelfRefresh - && (psDDRSetting->ulRegAddress - == 0x0F007018)) - value = (psDDRSetting->ulRegValue | (1<<8)); - else - value = psDDRSetting->ulRegValue; - - if (STATUS_SUCCESS != wrmalt(Adapter, - ul_ddr_setting_load_addr, - &value, - sizeof(value))) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, - "%s:%d\n", __func__, __LINE__); - break; - } - } - ul_ddr_setting_load_addr += sizeof(ULONG); - RegCount--; - psDDRSetting++; - } - return retval; -} diff --git a/drivers/staging/bcm/DDRInit.h b/drivers/staging/bcm/DDRInit.h deleted file mode 100644 index b0196fce9255..000000000000 --- a/drivers/staging/bcm/DDRInit.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef _DDR_INIT_H_ -#define _DDR_INIT_H_ - - - -int ddr_init(struct bcm_mini_adapter *psAdapter); -int download_ddr_settings(struct bcm_mini_adapter *psAdapter); - -#endif diff --git a/drivers/staging/bcm/Debug.h b/drivers/staging/bcm/Debug.h deleted file mode 100644 index 7b331215c1ac..000000000000 --- a/drivers/staging/bcm/Debug.h +++ /dev/null @@ -1,242 +0,0 @@ -/* - * Debug.h - * - * Dynamic (runtime) debug framework implementation. - * -kaiwan. - */ -#ifndef _DEBUG_H -#define _DEBUG_H -#include -#define NONE 0xFFFF - -/* TYPE and SUBTYPE - * Define valid TYPE (or category or code-path, however you like to think of it) - * and SUBTYPE s. - * Type and SubType are treated as bitmasks. - */ -#define DBG_TYPE_INITEXIT (1 << 0) /* 1 */ -#define DBG_TYPE_TX (1 << 1) /* 2 */ -#define DBG_TYPE_RX (1 << 2) /* 4 */ -#define DBG_TYPE_OTHERS (1 << 3) /* 8 */ -#define NUMTYPES 4 - -/* -SUBTYPEs for TX : TYPE is DBG_TYPE_TX -----// - * Transmit.c ,Arp.c, LeakyBucket.c, And Qos.c - * total 17 macros - */ -/* Transmit.c */ -#define TX 1 -#define MP_SEND (TX << 0) -#define NEXT_SEND (TX << 1) -#define TX_FIFO (TX << 2) -#define TX_CONTROL (TX << 3) - -/* Arp.c */ -#define IP_ADDR (TX << 4) -#define ARP_REQ (TX << 5) -#define ARP_RESP (TX << 6) - -/* Leakybucket.c */ -#define TOKEN_COUNTS (TX << 8) -#define CHECK_TOKENS (TX << 9) -#define TX_PACKETS (TX << 10) -#define TIMER (TX << 11) - -/* Qos.c */ -#define QOS TX -#define QUEUE_INDEX (QOS << 12) -#define IPV4_DBG (QOS << 13) -#define IPV6_DBG (QOS << 14) -#define PRUNE_QUEUE (QOS << 15) -#define SEND_QUEUE (QOS << 16) - -/* TX_Misc */ -#define TX_OSAL_DBG (TX << 17) - -/* --SUBTYPEs for ------INIT & EXIT--------------------- - * ------------ TYPE is DBG_TYPE_INITEXIT -----// - * DriverEntry.c, bcmfwup.c, ChipDetectTask.c, HaltnReset.c, InterfaceDDR.c - */ -#define MP 1 -#define DRV_ENTRY (MP << 0) -#define MP_INIT (MP << 1) -#define READ_REG (MP << 3) -#define DISPATCH (MP << 2) -#define CLAIM_ADAP (MP << 4) -#define REG_IO_PORT (MP << 5) -#define INIT_DISP (MP << 6) -#define RX_INIT (MP << 7) - -/* -SUBTYPEs for --RX---------------------------------- - * ------------RX : TYPE is DBG_TYPE_RX -----// - * Receive.c - */ -#define RX 1 -#define RX_DPC (RX << 0) -#define RX_CTRL (RX << 3) -#define RX_DATA (RX << 4) -#define MP_RETURN (RX << 1) -#define LINK_MSG (RX << 2) - -/* -SUBTYPEs for ----OTHER ROUTINES------------------ - * ------------OTHERS : TYPE is DBG_TYPE_OTHER -----// - * HaltnReset,CheckForHang,PnP,Misc,CmHost - * total 12 macros - */ -#define OTHERS 1 -#define ISR OTHERS -#define MP_DPC (ISR << 0) - -/* HaltnReset.c */ -#define HALT OTHERS -#define MP_HALT (HALT << 1) -#define CHECK_HANG (HALT << 2) -#define MP_RESET (HALT << 3) -#define MP_SHUTDOWN (HALT << 4) - -/* pnp.c */ -#define PNP OTHERS -#define MP_PNP (PNP << 5) - -/* Misc.c */ -#define MISC OTHERS -#define DUMP_INFO (MISC << 6) -#define CLASSIFY (MISC << 7) -#define LINK_UP_MSG (MISC << 8) -#define CP_CTRL_PKT (MISC << 9) -#define DUMP_CONTROL (MISC << 10) -#define LED_DUMP_INFO (MISC << 11) - -/* CmHost.c */ -#define CMHOST OTHERS -#define SERIAL (OTHERS << 12) -#define IDLE_MODE (OTHERS << 13) -#define WRM (OTHERS << 14) -#define RDM (OTHERS << 15) - -/* TODO - put PHS_SEND in Tx PHS_RECEIVE in Rx path ? */ -#define PHS_SEND (OTHERS << 16) -#define PHS_RECEIVE (OTHERS << 17) -#define PHS_MODULE (OTHERS << 18) - -#define INTF_INIT (OTHERS << 19) -#define INTF_ERR (OTHERS << 20) -#define INTF_WARN (OTHERS << 21) -#define INTF_NORM (OTHERS << 22) - -#define IRP_COMPLETION (OTHERS << 23) -#define SF_DESCRIPTOR_CNTS (OTHERS << 24) -#define PHS_DISPATCH (OTHERS << 25) -#define OSAL_DBG (OTHERS << 26) -#define NVM_RW (OTHERS << 27) - -#define HOST_MIBS (OTHERS << 28) -#define CONN_MSG (CMHOST << 29) - -/* Debug level - * We have 8 debug levels, in (numerical) increasing order of verbosity. - * IMP: Currently implementing ONLY DBG_LVL_ALL , i.e. , all debug prints will - * appear (of course, iff global debug flag is ON and we match the Type and SubType). - * Finer granularity debug levels are currently not in use, although the feature exists. - * - * Another way to say this: - * All the debug prints currently have 'debug_level' set to DBG_LVL_ALL . - * You can compile-time change that to any of the below, if you wish to. However, as of now, there's - * no dynamic facility to have the userspace 'TestApp' set debug_level. Slated for future expansion. - */ -#define BCM_ALL 7 -#define BCM_LOW 6 -#define BCM_PRINT 5 -#define BCM_NORMAL 4 -#define BCM_MEDIUM 3 -#define BCM_SCREAM 2 -#define BCM_ERR 1 -/* Not meant for developer in debug prints. - * To be used to disable all prints by setting the DBG_LVL_CURR to this value - */ -#define BCM_NONE 0 - -/* The current driver logging level. - * Everything at this level and (numerically) lower (meaning higher prio) - * is logged. - * Replace 'BCM_ALL' in the DBG_LVL_CURR macro with the logging level desired. - * For eg. to set the logging level to 'errors only' use: - * #define DBG_LVL_CURR (BCM_ERR) - */ - -#define DBG_LVL_CURR (BCM_ALL) -#define DBG_LVL_ALL BCM_ALL - -/* ---Userspace mapping of Debug State. - * Delibrately matches that of the Windows driver.. - * The TestApp's ioctl passes this struct to us. - */ -struct bcm_user_debug_state { - unsigned int Subtype, Type; - unsigned int OnOff; -/* unsigned int debug_level; future expansion */ -} __packed; - -/* ---Kernel-space mapping of Debug State */ -struct bcm_debug_state { - unsigned int type; - /* A bitmap of 32 bits for Subtype per Type. - * Valid indexes in 'subtype' array are *only* 1,2,4 and 8, - * corresponding to valid Type values. Hence we use the 'Type' field - * as the index value, ignoring the array entries 0,3,5,6,7 ! - */ - unsigned int subtype[(NUMTYPES*2)+1]; - unsigned int debug_level; -}; -/* Instantiated in the Adapter structure - * We'll reuse the debug level parameter to include a bit (the MSB) to indicate whether or not - * we want the function's name printed. - */ -#define DBG_NO_FUNC_PRINT (1 << 31) -#define DBG_LVL_BITMASK 0xFF - -/* --- Only for direct printk's; "hidden" to API. */ -#define DBG_TYPE_PRINTK 3 - -#define BCM_DEBUG_PRINT(Adapter, Type, SubType, dbg_level, string, args...) \ - do { \ - if (DBG_TYPE_PRINTK == Type) \ - pr_info("%s:" string, __func__, ##args); \ - else if (Adapter && \ - (dbg_level & DBG_LVL_BITMASK) <= Adapter->stDebugState.debug_level && \ - (Type & Adapter->stDebugState.type) && \ - (SubType & Adapter->stDebugState.subtype[Type])) { \ - if (dbg_level & DBG_NO_FUNC_PRINT) \ - pr_debug("%s:\n", string); \ - else \ - pr_debug("%s:\n" string, __func__, ##args); \ - } \ - } while (0) - -#define BCM_DEBUG_PRINT_BUFFER(Adapter, Type, SubType, dbg_level, buffer, bufferlen) \ - do { \ - if (DBG_TYPE_PRINTK == Type || \ - (Adapter && \ - (dbg_level & DBG_LVL_BITMASK) <= Adapter->stDebugState.debug_level && \ - (Type & Adapter->stDebugState.type) && \ - (SubType & Adapter->stDebugState.subtype[Type]))) { \ - pr_debug("%s:\n", __func__); \ - print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET, \ - 16, 1, buffer, bufferlen, false); \ - } \ - } while (0) - -#define BCM_SHOW_DEBUG_BITMAP(Adapter) do { \ - int i; \ - for (i = 0; i < (NUMTYPES * 2) + 1; i++) { \ - if ((i == 1) || (i == 2) || (i == 4) || (i == 8)) { \ - /* CAUTION! Forcefully turn on ALL debug paths and subpaths! \ - * Adapter->stDebugState.subtype[i] = 0xffffffff; \ - */ \ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "subtype[%d] = 0x%08x\n", \ - i, Adapter->stDebugState.subtype[i]); \ - } \ - } \ -} while (0) - -#endif diff --git a/drivers/staging/bcm/HandleControlPacket.c b/drivers/staging/bcm/HandleControlPacket.c deleted file mode 100644 index dd5d138a6528..000000000000 --- a/drivers/staging/bcm/HandleControlPacket.c +++ /dev/null @@ -1,241 +0,0 @@ -/** - * @file HandleControlPacket.c - * This file contains the routines to deal with - * sending and receiving of control packets. - */ -#include "headers.h" - -/** - * When a control packet is received, analyze the - * "status" and call appropriate response function. - * Enqueue the control packet for Application. - * @return None - */ -static VOID handle_rx_control_packet(struct bcm_mini_adapter *Adapter, - struct sk_buff *skb) -{ - struct bcm_tarang_data *pTarang = NULL; - bool HighPriorityMessage = false; - struct sk_buff *newPacket = NULL; - CHAR cntrl_msg_mask_bit = 0; - bool drop_pkt_flag = TRUE; - USHORT usStatus = *(PUSHORT)(skb->data); - - if (netif_msg_pktdata(Adapter)) - print_hex_dump(KERN_DEBUG, PFX "rx control: ", DUMP_PREFIX_NONE, - 16, 1, skb->data, skb->len, 0); - - switch (usStatus) { - case CM_RESPONSES: /* 0xA0 */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT, - DBG_LVL_ALL, - "MAC Version Seems to be Non Multi-Classifier, rejected by Driver"); - HighPriorityMessage = TRUE; - break; - case CM_CONTROL_NEWDSX_MULTICLASSIFIER_RESP: - HighPriorityMessage = TRUE; - if (Adapter->LinkStatus == LINKUP_DONE) - CmControlResponseMessage(Adapter, - (skb->data + sizeof(USHORT))); - break; - case LINK_CONTROL_RESP: /* 0xA2 */ - case STATUS_RSP: /* 0xA1 */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT, - DBG_LVL_ALL, "LINK_CONTROL_RESP"); - HighPriorityMessage = TRUE; - LinkControlResponseMessage(Adapter, - (skb->data + sizeof(USHORT))); - break; - case STATS_POINTER_RESP: /* 0xA6 */ - HighPriorityMessage = TRUE; - StatisticsResponse(Adapter, (skb->data + sizeof(USHORT))); - break; - case IDLE_MODE_STATUS: /* 0xA3 */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT, - DBG_LVL_ALL, - "IDLE_MODE_STATUS Type Message Got from F/W"); - InterfaceIdleModeRespond(Adapter, (PUINT)(skb->data + - sizeof(USHORT))); - HighPriorityMessage = TRUE; - break; - - case AUTH_SS_HOST_MSG: - HighPriorityMessage = TRUE; - break; - - default: - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT, - DBG_LVL_ALL, "Got Default Response"); - /* Let the Application Deal with This Packet */ - break; - } - - /* Queue The Control Packet to The Application Queues */ - down(&Adapter->RxAppControlQueuelock); - - for (pTarang = Adapter->pTarangs; pTarang; pTarang = pTarang->next) { - if (Adapter->device_removed) - break; - - drop_pkt_flag = TRUE; - /* - * There are cntrl msg from A0 to AC. It has been mapped to 0 to - * C bit in the cntrl mask. - * Also, by default AD to BF has been masked to the rest of the - * bits... which wil be ON by default. - * if mask bit is enable to particular pkt status, send it out - * to app else stop it. - */ - cntrl_msg_mask_bit = (usStatus & 0x1F); - /* - * printk("\ninew msg mask bit which is disable in mask:%X", - * cntrl_msg_mask_bit); - */ - if (pTarang->RxCntrlMsgBitMask & (1 << cntrl_msg_mask_bit)) - drop_pkt_flag = false; - - if ((drop_pkt_flag == TRUE) || - (pTarang->AppCtrlQueueLen > MAX_APP_QUEUE_LEN) - || ((pTarang->AppCtrlQueueLen > - MAX_APP_QUEUE_LEN / 2) && - (HighPriorityMessage == false))) { - /* - * Assumption:- - * 1. every tarang manages it own dropped pkt - * statitistics - * 2. Total packet dropped per tarang will be equal to - * the sum of all types of dropped pkt by that - * tarang only. - */ - struct bcm_mibs_dropped_cntrl_msg *msg = - &pTarang->stDroppedAppCntrlMsgs; - switch (*(PUSHORT)skb->data) { - case CM_RESPONSES: - msg->cm_responses++; - break; - case CM_CONTROL_NEWDSX_MULTICLASSIFIER_RESP: - msg->cm_control_newdsx_multiclassifier_resp++; - break; - case LINK_CONTROL_RESP: - msg->link_control_resp++; - break; - case STATUS_RSP: - msg->status_rsp++; - break; - case STATS_POINTER_RESP: - msg->stats_pointer_resp++; - break; - case IDLE_MODE_STATUS: - msg->idle_mode_status++; - break; - case AUTH_SS_HOST_MSG: - msg->auth_ss_host_msg++; - break; - default: - msg->low_priority_message++; - break; - } - - continue; - } - - newPacket = skb_clone(skb, GFP_KERNEL); - if (!newPacket) - break; - ENQUEUEPACKET(pTarang->RxAppControlHead, - pTarang->RxAppControlTail, newPacket); - pTarang->AppCtrlQueueLen++; - } - up(&Adapter->RxAppControlQueuelock); - wake_up(&Adapter->process_read_wait_queue); - dev_kfree_skb(skb); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, - "After wake_up_interruptible"); -} - -/** - * @ingroup ctrl_pkt_functions - * Thread to handle control pkt reception - */ - -/* pointer to adapter object*/ -int control_packet_handler(struct bcm_mini_adapter *Adapter) -{ - struct sk_buff *ctrl_packet = NULL; - unsigned long flags = 0; - /* struct timeval tv; */ - /* int *puiBuffer = NULL; */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, - "Entering to make thread wait on control packet event!"); - while (1) { - wait_event_interruptible(Adapter->process_rx_cntrlpkt, - atomic_read(&Adapter->cntrlpktCnt) || - Adapter->bWakeUpDevice || - kthread_should_stop()); - - - if (kthread_should_stop()) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT, - DBG_LVL_ALL, "Exiting\n"); - return 0; - } - if (TRUE == Adapter->bWakeUpDevice) { - Adapter->bWakeUpDevice = false; - if ((false == Adapter->bTriedToWakeUpFromlowPowerMode) - && ((TRUE == Adapter->IdleMode) || - (TRUE == Adapter->bShutStatus))) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, - CP_CTRL_PKT, DBG_LVL_ALL, - "Calling InterfaceAbortIdlemode\n"); - /* - * Adapter->bTriedToWakeUpFromlowPowerMode - * = TRUE; - */ - InterfaceIdleModeWakeup(Adapter); - } - continue; - } - - while (atomic_read(&Adapter->cntrlpktCnt)) { - spin_lock_irqsave(&Adapter->control_queue_lock, flags); - ctrl_packet = Adapter->RxControlHead; - if (ctrl_packet) { - DEQUEUEPACKET(Adapter->RxControlHead, - Adapter->RxControlTail); - /* Adapter->RxControlHead=ctrl_packet->next; */ - } - - spin_unlock_irqrestore(&Adapter->control_queue_lock, - flags); - handle_rx_control_packet(Adapter, ctrl_packet); - atomic_dec(&Adapter->cntrlpktCnt); - } - - SetUpTargetDsxBuffers(Adapter); - } - return STATUS_SUCCESS; -} - -INT flushAllAppQ(void) -{ - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - struct bcm_tarang_data *pTarang = NULL; - struct sk_buff *PacketToDrop = NULL; - - for (pTarang = Adapter->pTarangs; pTarang; pTarang = pTarang->next) { - while (pTarang->RxAppControlHead != NULL) { - PacketToDrop = pTarang->RxAppControlHead; - DEQUEUEPACKET(pTarang->RxAppControlHead, - pTarang->RxAppControlTail); - dev_kfree_skb(PacketToDrop); - } - pTarang->AppCtrlQueueLen = 0; - /* dropped contrl packet statistics also should be reset. */ - memset((PVOID)&pTarang->stDroppedAppCntrlMsgs, 0, - sizeof(struct bcm_mibs_dropped_cntrl_msg)); - - } - return STATUS_SUCCESS; -} - - diff --git a/drivers/staging/bcm/HostMIBSInterface.h b/drivers/staging/bcm/HostMIBSInterface.h deleted file mode 100644 index f922ac49b70e..000000000000 --- a/drivers/staging/bcm/HostMIBSInterface.h +++ /dev/null @@ -1,192 +0,0 @@ -#ifndef _HOST_MIBSINTERFACE_H -#define _HOST_MIBSINTERFACE_H - -/* - * Copyright (c) 2007 Beceem Communications Pvt. Ltd - * File Name: HostMIBSInterface.h - * Abstract: This file contains DS used by the Host to update the Host - * statistics used for the MIBS. - */ - -#define MIBS_MAX_CLASSIFIERS 100 -#define MIBS_MAX_PHSRULES 100 -#define MIBS_MAX_SERVICEFLOWS 17 -#define MIBS_MAX_IP_RANGE_LENGTH 4 -#define MIBS_MAX_PORT_RANGE 4 -#define MIBS_MAX_PROTOCOL_LENGTH 32 -#define MIBS_MAX_PHS_LENGTHS 255 -#define MIBS_IPV6_ADDRESS_SIZEINBYTES 0x10 -#define MIBS_IP_LENGTH_OF_ADDRESS 4 -#define MIBS_MAX_HIST_ENTRIES 12 -#define MIBS_PKTSIZEHIST_RANGE 128 - -union bcm_mibs_ip_addr { - struct { - /* Source Ip Address Range */ - unsigned long ulIpv4Addr[MIBS_MAX_IP_RANGE_LENGTH]; - /* Source Ip Mask Address Range */ - unsigned long ulIpv4Mask[MIBS_MAX_IP_RANGE_LENGTH]; - }; - struct { - /* Source Ip Address Range */ - unsigned long ulIpv6Addr[MIBS_MAX_IP_RANGE_LENGTH * 4]; - /* Source Ip Mask Address Range */ - unsigned long ulIpv6Mask[MIBS_MAX_IP_RANGE_LENGTH * 4]; - }; - struct { - unsigned char ucIpv4Address[MIBS_MAX_IP_RANGE_LENGTH * MIBS_IP_LENGTH_OF_ADDRESS]; - unsigned char ucIpv4Mask[MIBS_MAX_IP_RANGE_LENGTH * MIBS_IP_LENGTH_OF_ADDRESS]; - }; - struct { - unsigned char ucIpv6Address[MIBS_MAX_IP_RANGE_LENGTH * MIBS_IPV6_ADDRESS_SIZEINBYTES]; - unsigned char ucIpv6Mask[MIBS_MAX_IP_RANGE_LENGTH * MIBS_IPV6_ADDRESS_SIZEINBYTES]; - }; -}; - -struct bcm_mibs_host_info { - u64 GoodTransmits; - u64 GoodReceives; - /* this to keep track of the Tx and Rx MailBox Registers. */ - unsigned long NumDesUsed; - unsigned long CurrNumFreeDesc; - unsigned long PrevNumFreeDesc; - /* to keep track the no of byte received */ - unsigned long PrevNumRcevBytes; - unsigned long CurrNumRcevBytes; - /* QOS Related */ - unsigned long BEBucketSize; - unsigned long rtPSBucketSize; - unsigned long LastTxQueueIndex; - bool TxOutofDescriptors; - bool TimerActive; - u32 u32TotalDSD; - u32 aTxPktSizeHist[MIBS_MAX_HIST_ENTRIES]; - u32 aRxPktSizeHist[MIBS_MAX_HIST_ENTRIES]; -}; - -struct bcm_mibs_classifier_rule { - unsigned long ulSFID; - unsigned char ucReserved[2]; - u16 uiClassifierRuleIndex; - bool bUsed; - unsigned short usVCID_Value; - u8 u8ClassifierRulePriority; - union bcm_mibs_ip_addr stSrcIpAddress; - /* IP Source Address Length */ - unsigned char ucIPSourceAddressLength; - union bcm_mibs_ip_addr stDestIpAddress; - /* IP Destination Address Length */ - unsigned char ucIPDestinationAddressLength; - unsigned char ucIPTypeOfServiceLength; - unsigned char ucTosLow; - unsigned char ucTosHigh; - unsigned char ucTosMask; - unsigned char ucProtocolLength; - unsigned char ucProtocol[MIBS_MAX_PROTOCOL_LENGTH]; - unsigned short usSrcPortRangeLo[MIBS_MAX_PORT_RANGE]; - unsigned short usSrcPortRangeHi[MIBS_MAX_PORT_RANGE]; - unsigned char ucSrcPortRangeLength; - unsigned short usDestPortRangeLo[MIBS_MAX_PORT_RANGE]; - unsigned short usDestPortRangeHi[MIBS_MAX_PORT_RANGE]; - unsigned char ucDestPortRangeLength; - bool bProtocolValid; - bool bTOSValid; - bool bDestIpValid; - bool bSrcIpValid; - unsigned char ucDirection; - bool bIpv6Protocol; - u32 u32PHSRuleID; -}; - -struct bcm_mibs_phs_rule { - unsigned long ulSFID; - u8 u8PHSI; - u8 u8PHSFLength; - u8 u8PHSF[MIBS_MAX_PHS_LENGTHS]; - u8 u8PHSMLength; - u8 u8PHSM[MIBS_MAX_PHS_LENGTHS]; - u8 u8PHSS; - u8 u8PHSV; - u8 reserved[5]; - long PHSModifiedBytes; - unsigned long PHSModifiedNumPackets; - unsigned long PHSErrorNumPackets; -}; - -struct bcm_mibs_parameters { - u32 wmanIfSfid; - u32 wmanIfCmnCpsSfState; - u32 wmanIfCmnCpsMaxSustainedRate; - u32 wmanIfCmnCpsMaxTrafficBurst; - u32 wmanIfCmnCpsMinReservedRate; - u32 wmanIfCmnCpsToleratedJitter; - u32 wmanIfCmnCpsMaxLatency; - u32 wmanIfCmnCpsFixedVsVariableSduInd; - u32 wmanIfCmnCpsSduSize; - u32 wmanIfCmnCpsSfSchedulingType; - u32 wmanIfCmnCpsArqEnable; - u32 wmanIfCmnCpsArqWindowSize; - u32 wmanIfCmnCpsArqBlockLifetime; - u32 wmanIfCmnCpsArqSyncLossTimeout; - u32 wmanIfCmnCpsArqDeliverInOrder; - u32 wmanIfCmnCpsArqRxPurgeTimeout; - u32 wmanIfCmnCpsArqBlockSize; - u32 wmanIfCmnCpsMinRsvdTolerableRate; - u32 wmanIfCmnCpsReqTxPolicy; - u32 wmanIfCmnSfCsSpecification; - u32 wmanIfCmnCpsTargetSaid; -}; - -struct bcm_mibs_table { - unsigned long ulSFID; - unsigned short usVCID_Value; - unsigned int uiThreshold; - u8 u8TrafficPriority; - bool bValid; - bool bActive; - bool bActivateRequestSent; - u8 u8QueueType; - unsigned int uiMaxBucketSize; - unsigned int uiCurrentQueueDepthOnTarget; - unsigned int uiCurrentBytesOnHost; - unsigned int uiCurrentPacketsOnHost; - unsigned int uiDroppedCountBytes; - unsigned int uiDroppedCountPackets; - unsigned int uiSentBytes; - unsigned int uiSentPackets; - unsigned int uiCurrentDrainRate; - unsigned int uiThisPeriodSentBytes; - u64 liDrainCalculated; - unsigned int uiCurrentTokenCount; - u64 liLastUpdateTokenAt; - unsigned int uiMaxAllowedRate; - unsigned int NumOfPacketsSent; - unsigned char ucDirection; - unsigned short usCID; - struct bcm_mibs_parameters stMibsExtServiceFlowTable; - unsigned int uiCurrentRxRate; - unsigned int uiThisPeriodRxBytes; - unsigned int uiTotalRxBytes; - unsigned int uiTotalTxBytes; -}; - -struct bcm_mibs_dropped_cntrl_msg { - unsigned long cm_responses; - unsigned long cm_control_newdsx_multiclassifier_resp; - unsigned long link_control_resp; - unsigned long status_rsp; - unsigned long stats_pointer_resp; - unsigned long idle_mode_status; - unsigned long auth_ss_host_msg; - unsigned long low_priority_message; -}; - -struct bcm_host_stats_mibs { - struct bcm_mibs_host_info stHostInfo; - struct bcm_mibs_classifier_rule astClassifierTable[MIBS_MAX_CLASSIFIERS]; - struct bcm_mibs_table astSFtable[MIBS_MAX_SERVICEFLOWS]; - struct bcm_mibs_phs_rule astPhsRulesTable[MIBS_MAX_PHSRULES]; - struct bcm_mibs_dropped_cntrl_msg stDroppedAppCntrlMsgs; -}; - -#endif diff --git a/drivers/staging/bcm/IPv6Protocol.c b/drivers/staging/bcm/IPv6Protocol.c deleted file mode 100644 index 27f3f416f184..000000000000 --- a/drivers/staging/bcm/IPv6Protocol.c +++ /dev/null @@ -1,476 +0,0 @@ -#include "headers.h" - -static bool MatchSrcIpv6Address(struct bcm_classifier_rule *pstClassifierRule, - struct bcm_ipv6_hdr *pstIpv6Header); -static bool MatchDestIpv6Address(struct bcm_classifier_rule *pstClassifierRule, - struct bcm_ipv6_hdr *pstIpv6Header); -static VOID DumpIpv6Header(struct bcm_ipv6_hdr *pstIpv6Header); - -static UCHAR *GetNextIPV6ChainedHeader(UCHAR **ppucPayload, - UCHAR *pucNextHeader, bool *bParseDone, USHORT *pusPayloadLength) -{ - UCHAR *pucRetHeaderPtr = NULL; - UCHAR *pucPayloadPtr = NULL; - USHORT usNextHeaderOffset = 0; - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - - if ((ppucPayload == NULL) || (*pusPayloadLength == 0) || - (*bParseDone)) { - *bParseDone = TRUE; - return NULL; - } - - pucRetHeaderPtr = *ppucPayload; - pucPayloadPtr = *ppucPayload; - - if (!pucRetHeaderPtr || !pucPayloadPtr) { - *bParseDone = TRUE; - return NULL; - } - - /* Get the Nextt Header Type */ - *bParseDone = false; - - - switch (*pucNextHeader) { - case IPV6HDR_TYPE_HOPBYHOP: - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, - DBG_LVL_ALL, "\nIPv6 HopByHop Header"); - usNextHeaderOffset += sizeof(struct bcm_ipv6_options_hdr); - break; - - case IPV6HDR_TYPE_ROUTING: - { - struct bcm_ipv6_routing_hdr *pstIpv6RoutingHeader; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, - DBG_LVL_ALL, "\nIPv6 Routing Header"); - pstIpv6RoutingHeader = - (struct bcm_ipv6_routing_hdr *)pucPayloadPtr; - usNextHeaderOffset += sizeof(struct bcm_ipv6_routing_hdr); - usNextHeaderOffset += pstIpv6RoutingHeader->ucNumAddresses * - IPV6_ADDRESS_SIZEINBYTES; - } - break; - - case IPV6HDR_TYPE_FRAGMENTATION: - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, - DBG_LVL_ALL, - "\nIPv6 Fragmentation Header"); - usNextHeaderOffset += sizeof(struct bcm_ipv6_fragment_hdr); - break; - - case IPV6HDR_TYPE_DESTOPTS: - { - struct bcm_ipv6_dest_options_hdr *pstIpv6DestOptsHdr = - (struct bcm_ipv6_dest_options_hdr *)pucPayloadPtr; - int nTotalOptions = pstIpv6DestOptsHdr->ucHdrExtLen; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, - DBG_LVL_ALL, - "\nIPv6 DestOpts Header Header"); - usNextHeaderOffset += sizeof(struct bcm_ipv6_dest_options_hdr); - usNextHeaderOffset += nTotalOptions * - IPV6_DESTOPTS_HDR_OPTIONSIZE; - } - break; - - - case IPV6HDR_TYPE_AUTHENTICATION: - { - struct bcm_ipv6_authentication_hdr *pstIpv6AuthHdr = - (struct bcm_ipv6_authentication_hdr *)pucPayloadPtr; - int nHdrLen = pstIpv6AuthHdr->ucLength; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, - DBG_LVL_ALL, - "\nIPv6 Authentication Header"); - usNextHeaderOffset += nHdrLen * 4; - } - break; - - case IPV6HDR_TYPE_ENCRYPTEDSECURITYPAYLOAD: - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, - DBG_LVL_ALL, - "\nIPv6 Encrypted Security Payload Header"); - *bParseDone = TRUE; - break; - - case IPV6_ICMP_HDR_TYPE: - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, - DBG_LVL_ALL, "\nICMP Header"); - *bParseDone = TRUE; - break; - - case TCP_HEADER_TYPE: - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, - DBG_LVL_ALL, "\nTCP Header"); - *bParseDone = TRUE; - break; - - case UDP_HEADER_TYPE: - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, - DBG_LVL_ALL, "\nUDP Header"); - *bParseDone = TRUE; - break; - - default: - *bParseDone = TRUE; - break; - } - - if (*bParseDone == false) { - if (*pusPayloadLength <= usNextHeaderOffset) { - *bParseDone = TRUE; - } else { - *pucNextHeader = *pucPayloadPtr; - pucPayloadPtr += usNextHeaderOffset; - (*pusPayloadLength) -= usNextHeaderOffset; - } - - } - - *ppucPayload = pucPayloadPtr; - return pucRetHeaderPtr; -} - - -static UCHAR GetIpv6ProtocolPorts(UCHAR *pucPayload, USHORT *pusSrcPort, - USHORT *pusDestPort, USHORT usPayloadLength, UCHAR ucNextHeader) -{ - UCHAR *pIpv6HdrScanContext = pucPayload; - bool bDone = false; - UCHAR ucHeaderType = 0; - UCHAR *pucNextHeader = NULL; - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - - if (!pucPayload || (usPayloadLength == 0)) - return 0; - - *pusSrcPort = *pusDestPort = 0; - ucHeaderType = ucNextHeader; - while (!bDone) { - pucNextHeader = GetNextIPV6ChainedHeader(&pIpv6HdrScanContext, - &ucHeaderType, - &bDone, - &usPayloadLength); - if (bDone) { - if ((ucHeaderType == TCP_HEADER_TYPE) || - (ucHeaderType == UDP_HEADER_TYPE)) { - *pusSrcPort = *((PUSHORT)(pucNextHeader)); - *pusDestPort = *((PUSHORT)(pucNextHeader+2)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, - DBG_LVL_ALL, - "\nProtocol Ports - Src Port :0x%x Dest Port : 0x%x", - ntohs(*pusSrcPort), - ntohs(*pusDestPort)); - } - break; - - } - } - return ucHeaderType; -} - - -/* - * Arg 1 struct bcm_mini_adapter *Adapter is a pointer ot the driver control - * structure - * Arg 2 PVOID pcIpHeader is a pointer to the IP header of the packet - */ -USHORT IpVersion6(struct bcm_mini_adapter *Adapter, PVOID pcIpHeader, - struct bcm_classifier_rule *pstClassifierRule) -{ - USHORT ushDestPort = 0; - USHORT ushSrcPort = 0; - UCHAR ucNextProtocolAboveIP = 0; - struct bcm_ipv6_hdr *pstIpv6Header = NULL; - bool bClassificationSucceed = false; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, - DBG_LVL_ALL, "IpVersion6 ==========>\n"); - - pstIpv6Header = pcIpHeader; - - DumpIpv6Header(pstIpv6Header); - - /* - * Try to get the next higher layer protocol - * and the Ports Nos if TCP or UDP - */ - ucNextProtocolAboveIP = GetIpv6ProtocolPorts((UCHAR *)(pcIpHeader + - sizeof(struct bcm_ipv6_hdr)), - &ushSrcPort, - &ushDestPort, - pstIpv6Header->usPayloadLength, - pstIpv6Header->ucNextHeader); - - do { - if (pstClassifierRule->ucDirection == 0) { - /* - * cannot be processed for classification. - * it is a down link connection - */ - break; - } - - if (!pstClassifierRule->bIpv6Protocol) { - /* - * We are looking for Ipv6 Classifiers - * Lets ignore this classifier and try the next one - */ - break; - } - - bClassificationSucceed = MatchSrcIpv6Address(pstClassifierRule, - pstIpv6Header); - if (!bClassificationSucceed) - break; - - bClassificationSucceed = MatchDestIpv6Address(pstClassifierRule, - pstIpv6Header); - if (!bClassificationSucceed) - break; - - /* - * Match the protocol type. - * For IPv6 the next protocol at end of - * Chain of IPv6 prot headers - */ - bClassificationSucceed = MatchProtocol(pstClassifierRule, - ucNextProtocolAboveIP); - if (!bClassificationSucceed) - break; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, - DBG_LVL_ALL, "\nIPv6 Protocol Matched"); - - if ((ucNextProtocolAboveIP == TCP_HEADER_TYPE) || - (ucNextProtocolAboveIP == UDP_HEADER_TYPE)) { - /* Match Src Port */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, - DBG_LVL_ALL, "\nIPv6 Source Port:%x\n", - ntohs(ushSrcPort)); - bClassificationSucceed = MatchSrcPort(pstClassifierRule, - ntohs(ushSrcPort)); - if (!bClassificationSucceed) - break; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, - DBG_LVL_ALL, "\nIPv6 Src Port Matched"); - - /* Match Dest Port */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, - DBG_LVL_ALL, - "\nIPv6 Destination Port:%x\n", - ntohs(ushDestPort)); - bClassificationSucceed = MatchDestPort(pstClassifierRule, - ntohs(ushDestPort)); - if (!bClassificationSucceed) - break; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, - DBG_LVL_ALL, - "\nIPv6 Dest Port Matched"); - } - } while (0); - - if (bClassificationSucceed == TRUE) { - INT iMatchedSFQueueIndex = 0; - - iMatchedSFQueueIndex = SearchSfid(Adapter, - pstClassifierRule->ulSFID); - if ((iMatchedSFQueueIndex >= NO_OF_QUEUES) || - (Adapter->PackInfo[iMatchedSFQueueIndex].bActive == false)) - bClassificationSucceed = false; - } - - return bClassificationSucceed; -} - - -static bool MatchSrcIpv6Address(struct bcm_classifier_rule *pstClassifierRule, - struct bcm_ipv6_hdr *pstIpv6Header) -{ - UINT uiLoopIndex = 0; - UINT uiIpv6AddIndex = 0; - UINT uiIpv6AddrNoLongWords = 4; - ULONG aulSrcIP[4]; - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - union u_ip_address *src_addr = &pstClassifierRule->stSrcIpAddress; - - /* - * This is the no. of Src Addresses ie Range of IP Addresses contained - * in the classifier rule for which we need to match - */ - UINT uiCountIPSrcAddresses = - (UINT)pstClassifierRule->ucIPSourceAddressLength; - - - if (uiCountIPSrcAddresses == 0) - return TRUE; - - - /* First Convert the Ip Address in the packet to Host Endian order */ - for (uiIpv6AddIndex = 0; - uiIpv6AddIndex < uiIpv6AddrNoLongWords; - uiIpv6AddIndex++) - aulSrcIP[uiIpv6AddIndex] = - ntohl(pstIpv6Header->ulSrcIpAddress[uiIpv6AddIndex]); - - for (uiLoopIndex = 0; - uiLoopIndex < uiCountIPSrcAddresses; - uiLoopIndex += uiIpv6AddrNoLongWords) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL, - "\n Src Ipv6 Address In Received Packet :\n "); - DumpIpv6Address(aulSrcIP); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL, - "\n Src Ipv6 Mask In Classifier Rule:\n"); - DumpIpv6Address(&src_addr->ulIpv6Mask[uiLoopIndex]); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL, - "\n Src Ipv6 Address In Classifier Rule :\n"); - DumpIpv6Address(&src_addr->ulIpv6Addr[uiLoopIndex]); - - for (uiIpv6AddIndex = 0; - uiIpv6AddIndex < uiIpv6AddrNoLongWords; - uiIpv6AddIndex++) { - if ((src_addr->ulIpv6Mask[uiLoopIndex+uiIpv6AddIndex] & - aulSrcIP[uiIpv6AddIndex]) != - src_addr->ulIpv6Addr[uiLoopIndex+uiIpv6AddIndex]) { - /* - * Match failed for current Ipv6 Address - * Try next Ipv6 Address - */ - break; - } - - if (uiIpv6AddIndex == uiIpv6AddrNoLongWords-1) { - /* Match Found */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, - DBG_LVL_ALL, - "Ipv6 Src Ip Address Matched\n"); - return TRUE; - } - } - } - return false; -} - -static bool MatchDestIpv6Address(struct bcm_classifier_rule *pstClassifierRule, - struct bcm_ipv6_hdr *pstIpv6Header) -{ - UINT uiLoopIndex = 0; - UINT uiIpv6AddIndex = 0; - UINT uiIpv6AddrNoLongWords = 4; - ULONG aulDestIP[4]; - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - union u_ip_address *dest_addr = &pstClassifierRule->stDestIpAddress; - - /* - * This is the no. of Destination Addresses - * ie Range of IP Addresses contained in the classifier rule - * for which we need to match - */ - UINT uiCountIPDestinationAddresses = - (UINT)pstClassifierRule->ucIPDestinationAddressLength; - - if (uiCountIPDestinationAddresses == 0) - return TRUE; - - - /* First Convert the Ip Address in the packet to Host Endian order */ - for (uiIpv6AddIndex = 0; - uiIpv6AddIndex < uiIpv6AddrNoLongWords; - uiIpv6AddIndex++) - aulDestIP[uiIpv6AddIndex] = - ntohl(pstIpv6Header->ulDestIpAddress[uiIpv6AddIndex]); - - for (uiLoopIndex = 0; - uiLoopIndex < uiCountIPDestinationAddresses; - uiLoopIndex += uiIpv6AddrNoLongWords) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL, - "\n Destination Ipv6 Address In Received Packet :\n "); - DumpIpv6Address(aulDestIP); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL, - "\n Destination Ipv6 Mask In Classifier Rule :\n"); - DumpIpv6Address(&dest_addr->ulIpv6Mask[uiLoopIndex]); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL, - "\n Destination Ipv6 Address In Classifier Rule :\n"); - DumpIpv6Address(&dest_addr->ulIpv6Addr[uiLoopIndex]); - - for (uiIpv6AddIndex = 0; - uiIpv6AddIndex < uiIpv6AddrNoLongWords; - uiIpv6AddIndex++) { - if ((dest_addr->ulIpv6Mask[uiLoopIndex+uiIpv6AddIndex] & - aulDestIP[uiIpv6AddIndex]) != - dest_addr->ulIpv6Addr[uiLoopIndex+uiIpv6AddIndex]) { - /* - * Match failed for current Ipv6 Address. - * Try next Ipv6 Address - */ - break; - } - - if (uiIpv6AddIndex == uiIpv6AddrNoLongWords-1) { - /* Match Found */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, - DBG_LVL_ALL, - "Ipv6 Destination Ip Address Matched\n"); - return TRUE; - } - } - } - return false; - -} - -VOID DumpIpv6Address(ULONG *puIpv6Address) -{ - UINT uiIpv6AddrNoLongWords = 4; - UINT uiIpv6AddIndex = 0; - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - - for (uiIpv6AddIndex = 0; - uiIpv6AddIndex < uiIpv6AddrNoLongWords; - uiIpv6AddIndex++) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL, - ":%lx", puIpv6Address[uiIpv6AddIndex]); - } - -} - -static VOID DumpIpv6Header(struct bcm_ipv6_hdr *pstIpv6Header) -{ - UCHAR ucVersion; - UCHAR ucPrio; - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL, - "----Ipv6 Header---"); - ucVersion = pstIpv6Header->ucVersionPrio & 0xf0; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL, - "Version : %x\n", ucVersion); - ucPrio = pstIpv6Header->ucVersionPrio & 0x0f; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL, - "Priority : %x\n", ucPrio); - /* - * BCM_DEBUG_PRINT( Adapter,DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL, - * "Flow Label : %x\n",(pstIpv6Header->ucVersionPrio &0xf0); - */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL, - "Payload Length : %x\n", - ntohs(pstIpv6Header->usPayloadLength)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL, - "Next Header : %x\n", pstIpv6Header->ucNextHeader); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL, - "Hop Limit : %x\n", pstIpv6Header->ucHopLimit); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL, - "Src Address :\n"); - DumpIpv6Address(pstIpv6Header->ulSrcIpAddress); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL, - "Dest Address :\n"); - DumpIpv6Address(pstIpv6Header->ulDestIpAddress); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL, - "----Ipv6 Header End---"); - - -} diff --git a/drivers/staging/bcm/IPv6ProtocolHdr.h b/drivers/staging/bcm/IPv6ProtocolHdr.h deleted file mode 100644 index 96b36a579af2..000000000000 --- a/drivers/staging/bcm/IPv6ProtocolHdr.h +++ /dev/null @@ -1,85 +0,0 @@ -#ifndef _IPV6_PROTOCOL_DEFINES_ -#define _IPV6_PROTOCOL_DEFINES_ - -#define IPV6HDR_TYPE_HOPBYHOP 0x0 -#define IPV6HDR_TYPE_ROUTING 0x2B -#define IPV6HDR_TYPE_FRAGMENTATION 0x2C -#define IPV6HDR_TYPE_DESTOPTS 0x3c -#define IPV6HDR_TYPE_AUTHENTICATION 0x33 -#define IPV6HDR_TYPE_ENCRYPTEDSECURITYPAYLOAD 0x34 -#define MASK_IPV6_CS_SPEC 0x2 - -#define TCP_HEADER_TYPE 0x6 -#define UDP_HEADER_TYPE 0x11 -#define IPV6_ICMP_HDR_TYPE 0x2 -#define IPV6_FLOWLABEL_BITOFFSET 9 - -#define IPV6_MAX_CHAINEDHDR_BUFFBYTES 0x64 -/* - * Size of Dest Options field of Destinations Options Header - * in bytes. - */ -#define IPV6_DESTOPTS_HDR_OPTIONSIZE 0x8 - -struct bcm_ipv6_hdr { - unsigned char ucVersionPrio; - unsigned char aucFlowLabel[3]; - unsigned short usPayloadLength; - unsigned char ucNextHeader; - unsigned char ucHopLimit; - unsigned long ulSrcIpAddress[4]; - unsigned long ulDestIpAddress[4]; -}; - -struct bcm_ipv6_routing_hdr { - unsigned char ucNextHeader; - unsigned char ucRoutingType; - unsigned char ucNumAddresses; - unsigned char ucNextAddress; - unsigned long ulReserved; -}; - -struct bcm_ipv6_fragment_hdr { - unsigned char ucNextHeader; - unsigned char ucReserved; - unsigned short usFragmentOffset; - unsigned long ulIdentification; -}; - -struct bcm_ipv6_dest_options_hdr { - unsigned char ucNextHeader; - unsigned char ucHdrExtLen; - unsigned char ucDestOptions[6]; -}; - -struct bcm_ipv6_options_hdr { - unsigned char ucNextHeader; - unsigned char ucMisc[3]; - unsigned long ulJumboPayloadLen; -}; - -struct bcm_ipv6_authentication_hdr { - unsigned char ucNextHeader; - unsigned char ucLength; - unsigned short usReserved; - unsigned long ulSecurityParametersIndex; -}; - -enum bcm_ipaddr_context { - eSrcIpAddress, - eDestIpAddress -}; - -/* Function Prototypes */ - -unsigned short IpVersion6(struct bcm_mini_adapter *Adapter, /* < Pointer to the driver control structure */ - void *pcIpHeader, /* psAdapter; */ - char *buff = kmalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_KERNEL); - - if (!buff) - return -ENOMEM; - - while (1) { - oldfs = get_fs(); - set_fs(get_ds()); - len = vfs_read(flp, (void __force __user *)buff, - MAX_TRANSFER_CTRL_BYTE_USB, &pos); - set_fs(oldfs); - if (len <= 0) { - if (len < 0) - errno = len; - else - errno = 0; - break; - } - /* BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_INITEXIT, MP_INIT, - * DBG_LVL_ALL, buff, - * MAX_TRANSFER_CTRL_BYTE_USB); - */ - errno = InterfaceWRM(psIntfAdapter, on_chip_loc, buff, len); - if (errno) - break; - on_chip_loc += MAX_TRANSFER_CTRL_BYTE_USB; - } - - kfree(buff); - return errno; -} - -int InterfaceFileReadbackFromChip(PVOID arg, struct file *flp, - unsigned int on_chip_loc) -{ - char *buff, *buff_readback; - unsigned int reg = 0; - mm_segment_t oldfs = {0}; - int errno = 0, len = 0, is_config_file = 0; - loff_t pos = 0; - static int fw_down; - INT Status = STATUS_SUCCESS; - struct bcm_interface_adapter *psIntfAdapter = arg; - int bytes; - - buff = kzalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_DMA); - buff_readback = kzalloc(MAX_TRANSFER_CTRL_BYTE_USB , GFP_DMA); - if (!buff || !buff_readback) { - kfree(buff); - kfree(buff_readback); - - return -ENOMEM; - } - - is_config_file = (on_chip_loc == CONFIG_BEGIN_ADDR) ? 1 : 0; - - while (1) { - oldfs = get_fs(); - set_fs(get_ds()); - len = vfs_read(flp, (void __force __user *)buff, - MAX_TRANSFER_CTRL_BYTE_USB, &pos); - set_fs(oldfs); - fw_down++; - - if (len <= 0) { - if (len < 0) - errno = len; - else - errno = 0; - break; - } - - bytes = InterfaceRDM(psIntfAdapter, on_chip_loc, - buff_readback, len); - if (bytes < 0) { - Status = bytes; - goto exit; - } - reg++; - if ((len-sizeof(unsigned int)) < 4) { - if (memcmp(buff_readback, buff, len)) { - Status = -EIO; - goto exit; - } - } else { - len -= 4; - - while (len) { - if (*(unsigned int *)&buff_readback[len] != - *(unsigned int *)&buff[len]) { - Status = -EIO; - goto exit; - } - len -= 4; - } - } - on_chip_loc += MAX_TRANSFER_CTRL_BYTE_USB; - } /* End of while(1) */ - -exit: - kfree(buff); - kfree(buff_readback); - return Status; -} - -static int bcm_download_config_file(struct bcm_mini_adapter *Adapter, - struct bcm_firmware_info *psFwInfo) -{ - int retval = STATUS_SUCCESS; - B_UINT32 value = 0; - - if (Adapter->pstargetparams == NULL) { - Adapter->pstargetparams = - kmalloc(sizeof(struct bcm_target_params), GFP_KERNEL); - if (Adapter->pstargetparams == NULL) - return -ENOMEM; - } - - if (psFwInfo->u32FirmwareLength != sizeof(struct bcm_target_params)) - return -EIO; - - retval = copy_from_user(Adapter->pstargetparams, - psFwInfo->pvMappedFirmwareAddress, - psFwInfo->u32FirmwareLength); - if (retval) { - kfree(Adapter->pstargetparams); - Adapter->pstargetparams = NULL; - return -EFAULT; - } - - /* Parse the structure and then Download the Firmware */ - beceem_parse_target_struct(Adapter); - - /* Initializing the NVM. */ - BcmInitNVM(Adapter); - retval = InitLedSettings(Adapter); - - if (retval) - return retval; - - if (Adapter->LEDInfo.led_thread_running & - BCM_LED_THREAD_RUNNING_ACTIVELY) { - Adapter->LEDInfo.bLedInitDone = false; - Adapter->DriverState = DRIVER_INIT; - wake_up(&Adapter->LEDInfo.notify_led_event); - } - - if (Adapter->LEDInfo.led_thread_running & - BCM_LED_THREAD_RUNNING_ACTIVELY) { - Adapter->DriverState = FW_DOWNLOAD; - wake_up(&Adapter->LEDInfo.notify_led_event); - } - - /* Initialize the DDR Controller */ - retval = ddr_init(Adapter); - if (retval) - return retval; - - value = 0; - wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 4, - &value, sizeof(value)); - wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 8, - &value, sizeof(value)); - - if (Adapter->eNVMType == NVM_FLASH) { - retval = PropagateCalParamsFromFlashToMemory(Adapter); - if (retval) - return retval; - } - - retval = buffDnldVerify(Adapter, (PUCHAR)Adapter->pstargetparams, - sizeof(struct bcm_target_params), CONFIG_BEGIN_ADDR); - - if (retval) - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, - MP_INIT, DBG_LVL_ALL, - "configuration file not downloaded properly"); - else - Adapter->bCfgDownloaded = TRUE; - - return retval; -} - -int bcm_ioctl_fw_download(struct bcm_mini_adapter *Adapter, - struct bcm_firmware_info *psFwInfo) -{ - int retval = STATUS_SUCCESS; - PUCHAR buff = NULL; - - /* Config File is needed for the Driver to download the Config file and - * Firmware. Check for the Config file to be first to be sent from the - * Application - */ - atomic_set(&Adapter->uiMBupdate, false); - if (!Adapter->bCfgDownloaded && - psFwInfo->u32StartingAddress != CONFIG_BEGIN_ADDR) { - /* Can't Download Firmware. */ - return -EINVAL; - } - - /* If Config File, Finish the DDR Settings and then Download CFG File */ - if (psFwInfo->u32StartingAddress == CONFIG_BEGIN_ADDR) { - retval = bcm_download_config_file(Adapter, psFwInfo); - } else { - buff = kzalloc(psFwInfo->u32FirmwareLength, GFP_KERNEL); - if (buff == NULL) - return -ENOMEM; - - retval = copy_from_user(buff, - psFwInfo->pvMappedFirmwareAddress, - psFwInfo->u32FirmwareLength); - if (retval != STATUS_SUCCESS) { - retval = -EFAULT; - goto error; - } - - retval = buffDnldVerify(Adapter, - buff, - psFwInfo->u32FirmwareLength, - psFwInfo->u32StartingAddress); - - if (retval != STATUS_SUCCESS) - goto error; - } - -error: - kfree(buff); - return retval; -} - -static INT buffDnld(struct bcm_mini_adapter *Adapter, - PUCHAR mappedbuffer, UINT u32FirmwareLength, - ULONG u32StartingAddress) -{ - unsigned int len = 0; - int retval = STATUS_SUCCESS; - - len = u32FirmwareLength; - - while (u32FirmwareLength) { - len = MIN_VAL(u32FirmwareLength, MAX_TRANSFER_CTRL_BYTE_USB); - retval = wrm(Adapter, u32StartingAddress, mappedbuffer, len); - - if (retval) - break; - u32StartingAddress += len; - u32FirmwareLength -= len; - mappedbuffer += len; - } - return retval; -} - -static INT buffRdbkVerify(struct bcm_mini_adapter *Adapter, - PUCHAR mappedbuffer, UINT u32FirmwareLength, - ULONG u32StartingAddress) -{ - UINT len = u32FirmwareLength; - INT retval = STATUS_SUCCESS; - PUCHAR readbackbuff = kzalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_KERNEL); - int bytes; - - if (NULL == readbackbuff) - return -ENOMEM; - - while (u32FirmwareLength && !retval) { - len = MIN_VAL(u32FirmwareLength, MAX_TRANSFER_CTRL_BYTE_USB); - bytes = rdm(Adapter, u32StartingAddress, readbackbuff, len); - - if (bytes < 0) { - retval = bytes; - break; - } - - if (memcmp(readbackbuff, mappedbuffer, len) != 0) { - pr_err("%s() failed. The firmware doesn't match what was written", - __func__); - retval = -EIO; - } - - u32StartingAddress += len; - u32FirmwareLength -= len; - mappedbuffer += len; - - } /* end of while (u32FirmwareLength && !retval) */ - kfree(readbackbuff); - return retval; -} - -INT buffDnldVerify(struct bcm_mini_adapter *Adapter, - unsigned char *mappedbuffer, - unsigned int u32FirmwareLength, - unsigned long u32StartingAddress) -{ - INT status = STATUS_SUCCESS; - - status = buffDnld(Adapter, mappedbuffer, - u32FirmwareLength, u32StartingAddress); - if (status != STATUS_SUCCESS) - goto error; - - status = buffRdbkVerify(Adapter, mappedbuffer, - u32FirmwareLength, u32StartingAddress); - if (status != STATUS_SUCCESS) - goto error; -error: - return status; -} diff --git a/drivers/staging/bcm/InterfaceIdleMode.c b/drivers/staging/bcm/InterfaceIdleMode.c deleted file mode 100644 index 612c89fba341..000000000000 --- a/drivers/staging/bcm/InterfaceIdleMode.c +++ /dev/null @@ -1,274 +0,0 @@ -#include "headers.h" - -/* -Function: InterfaceIdleModeWakeup - -Description: This is the hardware specific Function for - waking up HW device from Idle mode. - A software abort pattern is written to the - device to wake it and necessary power state - transitions from host are performed here. - -Input parameters: IN struct bcm_mini_adapter *Adapter - - Miniport Adapter Context - -Return: BCM_STATUS_SUCCESS - If Wakeup of the HW Interface - was successful. - Other - If an error occurred. -*/ - -/* -Function: InterfaceIdleModeRespond - -Description: This is the hardware specific Function for - responding to Idle mode request from target. - Necessary power state transitions from host for - idle mode or other device specific initializations - are performed here. - -Input parameters: IN struct bcm_mini_adapter * Adapter - - Miniport Adapter Context - -Return: BCM_STATUS_SUCCESS - If Idle mode response related - HW configuration was successful. - Other - If an error occurred. -*/ - -/* -"dmem bfc02f00 100" tells how many time device went in Idle mode. -this value will be at address bfc02fa4.just before value d0ea1dle. - -Set time value by writing at bfc02f98 7d0 - -checking the Ack timer expire on kannon by running command -d qcslog .. if it shows e means host has not send response -to f/w with in 200 ms. Response should be -send to f/w with in 200 ms after the Idle/Shutdown req issued - -*/ - - -int InterfaceIdleModeRespond(struct bcm_mini_adapter *Adapter, - unsigned int *puiBuffer) -{ - int status = STATUS_SUCCESS; - unsigned int uiRegRead = 0; - int bytes; - - if (ntohl(*puiBuffer) == GO_TO_IDLE_MODE_PAYLOAD) { - if (ntohl(*(puiBuffer+1)) == 0) { - - status = wrmalt(Adapter, SW_ABORT_IDLEMODE_LOC, - &uiRegRead, sizeof(uiRegRead)); - if (status) - return status; - - if (Adapter->ulPowerSaveMode == - DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) { - uiRegRead = 0x00000000; - status = wrmalt(Adapter, - DEBUG_INTERRUPT_GENERATOR_REGISTOR, - &uiRegRead, sizeof(uiRegRead)); - if (status) - return status; - } - /* Below Register should not br read in case of - * Manual and Protocol Idle mode */ - else if (Adapter->ulPowerSaveMode != - DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE) { - /* clear on read Register */ - bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG0, - &uiRegRead, sizeof(uiRegRead)); - if (bytes < 0) { - status = bytes; - return status; - } - /* clear on read Register */ - bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG1, - &uiRegRead, sizeof(uiRegRead)); - if (bytes < 0) { - status = bytes; - return status; - } - } - - /* Set Idle Mode Flag to False and - * Clear IdleMode reg. */ - Adapter->IdleMode = false; - Adapter->bTriedToWakeUpFromlowPowerMode = false; - - wake_up(&Adapter->lowpower_mode_wait_queue); - - } else { - if (TRUE == Adapter->IdleMode) - return status; - - uiRegRead = 0; - - if (Adapter->chip_id == BCS220_2 || - Adapter->chip_id == BCS220_2BC || - Adapter->chip_id == BCS250_BC || - Adapter->chip_id == BCS220_3) { - - bytes = rdmalt(Adapter, HPM_CONFIG_MSW, - &uiRegRead, sizeof(uiRegRead)); - if (bytes < 0) { - status = bytes; - return status; - } - - - uiRegRead |= (1<<17); - - status = wrmalt(Adapter, HPM_CONFIG_MSW, - &uiRegRead, sizeof(uiRegRead)); - if (status) - return status; - } - SendIdleModeResponse(Adapter); - } - } else if (ntohl(*puiBuffer) == IDLE_MODE_SF_UPDATE_MSG) { - OverrideServiceFlowParams(Adapter, puiBuffer); - } - return status; -} - -static int InterfaceAbortIdlemode(struct bcm_mini_adapter *Adapter, - unsigned int Pattern) -{ - int status = STATUS_SUCCESS; - unsigned int value; - unsigned int chip_id; - unsigned long timeout = 0, itr = 0; - - int lenwritten = 0; - unsigned char aucAbortPattern[8] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF}; - struct bcm_interface_adapter *psInterfaceAdapter = - Adapter->pvInterfaceAdapter; - - /* Abort Bus suspend if its already suspended */ - if ((TRUE == psInterfaceAdapter->bSuspended) && - (TRUE == Adapter->bDoSuspend)) - status = usb_autopm_get_interface( - psInterfaceAdapter->interface); - - if ((Adapter->ulPowerSaveMode == - DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) || - (Adapter->ulPowerSaveMode == - DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE)) { - /* write the SW abort pattern. */ - status = wrmalt(Adapter, SW_ABORT_IDLEMODE_LOC, - &Pattern, sizeof(Pattern)); - if (status) - return status; - } - - if (Adapter->ulPowerSaveMode == - DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) { - value = 0x80000000; - status = wrmalt(Adapter, - DEBUG_INTERRUPT_GENERATOR_REGISTOR, - &value, sizeof(value)); - if (status) - return status; - } else if (Adapter->ulPowerSaveMode != - DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE) { - /* - * Get a Interrupt Out URB and send 8 Bytes Down - * To be Done in Thread Context. - * Not using Asynchronous Mechanism. - */ - status = usb_interrupt_msg(psInterfaceAdapter->udev, - usb_sndintpipe(psInterfaceAdapter->udev, - psInterfaceAdapter->sIntrOut.int_out_endpointAddr), - aucAbortPattern, - 8, - &lenwritten, - 5000); - if (status) - return status; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, - IDLE_MODE, DBG_LVL_ALL, - "NOB Sent down :%d", lenwritten); - - /* mdelay(25); */ - - timeout = jiffies + msecs_to_jiffies(50); - while (time_after(timeout, jiffies)) { - itr++; - rdmalt(Adapter, CHIP_ID_REG, &chip_id, sizeof(UINT)); - if (0xbece3200 == (chip_id&~(0xF0))) - chip_id = chip_id&~(0xF0); - if (chip_id == Adapter->chip_id) - break; - } - if (time_before(timeout, jiffies)) - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, - IDLE_MODE, DBG_LVL_ALL, - "Not able to read chip-id even after 25 msec"); - else - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, - IDLE_MODE, DBG_LVL_ALL, - "Number of completed iteration to read chip-id :%lu", itr); - - status = wrmalt(Adapter, SW_ABORT_IDLEMODE_LOC, - &Pattern, sizeof(status)); - if (status) - return status; - } - return status; -} -int InterfaceIdleModeWakeup(struct bcm_mini_adapter *Adapter) -{ - if (Adapter->bTriedToWakeUpFromlowPowerMode) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, - IDLE_MODE, DBG_LVL_ALL, - "Wake up already attempted.. ignoring\n"); - } else { - Adapter->bTriedToWakeUpFromlowPowerMode = TRUE; - InterfaceAbortIdlemode(Adapter, Adapter->usIdleModePattern); - - } - return 0; -} - -void InterfaceHandleShutdownModeWakeup(struct bcm_mini_adapter *Adapter) -{ - unsigned int uiRegVal = 0; - INT Status = 0; - int bytes; - - if (Adapter->ulPowerSaveMode == - DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) { - /* clear idlemode interrupt. */ - uiRegVal = 0; - Status = wrmalt(Adapter, - DEBUG_INTERRUPT_GENERATOR_REGISTOR, - &uiRegVal, sizeof(uiRegVal)); - if (Status) - return; - } - - else { - -/* clear Interrupt EP registers. */ - bytes = rdmalt(Adapter, - DEVICE_INT_OUT_EP_REG0, - &uiRegVal, sizeof(uiRegVal)); - if (bytes < 0) { - Status = bytes; - return; - } - - bytes = rdmalt(Adapter, - DEVICE_INT_OUT_EP_REG1, - &uiRegVal, sizeof(uiRegVal)); - if (bytes < 0) { - Status = bytes; - return; - } - } -} - diff --git a/drivers/staging/bcm/InterfaceIdleMode.h b/drivers/staging/bcm/InterfaceIdleMode.h deleted file mode 100644 index 2ef64003aa89..000000000000 --- a/drivers/staging/bcm/InterfaceIdleMode.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef _INTERFACE_IDLEMODE_H -#define _INTERFACE_IDLEMODE_H - -INT InterfaceIdleModeWakeup(struct bcm_mini_adapter *Adapter); - -INT InterfaceIdleModeRespond(struct bcm_mini_adapter *Adapter, - unsigned int *puiBuffer); - -VOID InterfaceWriteIdleModeWakePattern(struct bcm_mini_adapter *Adapter); - -INT InterfaceWakeUp(struct bcm_mini_adapter *Adapter); - -VOID InterfaceHandleShutdownModeWakeup(struct bcm_mini_adapter *Adapter); -#endif - diff --git a/drivers/staging/bcm/InterfaceInit.c b/drivers/staging/bcm/InterfaceInit.c deleted file mode 100644 index 9eaffd884a4c..000000000000 --- a/drivers/staging/bcm/InterfaceInit.c +++ /dev/null @@ -1,729 +0,0 @@ -#include "headers.h" -#include -static struct usb_device_id InterfaceUsbtable[] = { - { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3) }, - { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3B) }, - { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3L) }, - { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_SYM) }, - { USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_226) }, - { USB_DEVICE(BCM_USB_VENDOR_ID_FOXCONN, BCM_USB_PRODUCT_ID_1901) }, - { USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_ZTE_TU25) }, - { USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_ZTE_226) }, - { USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_ZTE_326) }, - { } -}; -MODULE_DEVICE_TABLE(usb, InterfaceUsbtable); - -static int debug = -1; -module_param(debug, int, 0600); -MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); - -static const u32 default_msg = - NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK - | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR - | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; - -static int InterfaceAdapterInit(struct bcm_interface_adapter *Adapter); - -static void InterfaceAdapterFree(struct bcm_interface_adapter *psIntfAdapter) -{ - int i = 0; - struct bcm_mini_adapter *ps_ad = psIntfAdapter->psAdapter; - - /* Wake up the wait_queue... */ - if (ps_ad->LEDInfo.led_thread_running & - BCM_LED_THREAD_RUNNING_ACTIVELY) { - ps_ad->DriverState = DRIVER_HALT; - wake_up(&ps_ad->LEDInfo.notify_led_event); - } - reset_card_proc(ps_ad); - - /* - * worst case time taken by the RDM/WRM will be 5 sec. will check after - * every 100 ms to accertain the device is not being accessed. After - * this No RDM/WRM should be made. - */ - while (ps_ad->DeviceAccess) { - BCM_DEBUG_PRINT(ps_ad, DBG_TYPE_INITEXIT, DRV_ENTRY, - DBG_LVL_ALL, "Device is being accessed.\n"); - msleep(100); - } - /* Free interrupt URB */ - /* ps_ad->device_removed = TRUE; */ - usb_free_urb(psIntfAdapter->psInterruptUrb); - - /* Free transmit URBs */ - for (i = 0; i < MAXIMUM_USB_TCB; i++) { - if (psIntfAdapter->asUsbTcb[i].urb != NULL) { - usb_free_urb(psIntfAdapter->asUsbTcb[i].urb); - psIntfAdapter->asUsbTcb[i].urb = NULL; - } - } - /* Free receive URB and buffers */ - for (i = 0; i < MAXIMUM_USB_RCB; i++) { - if (psIntfAdapter->asUsbRcb[i].urb != NULL) { - kfree(psIntfAdapter->asUsbRcb[i].urb->transfer_buffer); - usb_free_urb(psIntfAdapter->asUsbRcb[i].urb); - psIntfAdapter->asUsbRcb[i].urb = NULL; - } - } - AdapterFree(ps_ad); -} - -static void ConfigureEndPointTypesThroughEEPROM( - struct bcm_mini_adapter *Adapter) -{ - u32 ulReg; - int bytes; - struct bcm_interface_adapter *interfaceAdapter; - - /* Program EP2 MAX_PKT_SIZE */ - ulReg = ntohl(EP2_MPS_REG); - BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x128, 4, TRUE); - ulReg = ntohl(EP2_MPS); - BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x12C, 4, TRUE); - - ulReg = ntohl(EP2_CFG_REG); - BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x132, 4, TRUE); - interfaceAdapter = - (struct bcm_interface_adapter *)(Adapter->pvInterfaceAdapter); - if (interfaceAdapter->bHighSpeedDevice) { - ulReg = ntohl(EP2_CFG_INT); - BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x136, 4, TRUE); - } else { - /* USE BULK EP as TX in FS mode. */ - ulReg = ntohl(EP2_CFG_BULK); - BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x136, 4, TRUE); - } - - /* Program EP4 MAX_PKT_SIZE. */ - ulReg = ntohl(EP4_MPS_REG); - BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x13C, 4, TRUE); - ulReg = ntohl(EP4_MPS); - BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x140, 4, TRUE); - - /* Program TX EP as interrupt(Alternate Setting) */ - bytes = rdmalt(Adapter, 0x0F0110F8, &ulReg, sizeof(u32)); - if (bytes < 0) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, DRV_ENTRY, - DBG_LVL_ALL, "reading of Tx EP failed\n"); - return; - } - ulReg |= 0x6; - - ulReg = ntohl(ulReg); - BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1CC, 4, TRUE); - - ulReg = ntohl(EP4_CFG_REG); - BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1C8, 4, TRUE); - /* Program ISOCHRONOUS EP size to zero. */ - ulReg = ntohl(ISO_MPS_REG); - BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1D2, 4, TRUE); - ulReg = ntohl(ISO_MPS); - BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1D6, 4, TRUE); - - /* - * Update EEPROM Version. - * Read 4 bytes from 508 and modify 511 and 510. - */ - ReadBeceemEEPROM(Adapter, 0x1FC, &ulReg); - ulReg &= 0x0101FFFF; - BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1FC, 4, TRUE); - - /* - * Update length field if required. - * Also make the string NULL terminated. - */ - - ReadBeceemEEPROM(Adapter, 0xA8, &ulReg); - if ((ulReg&0x00FF0000)>>16 > 0x30) { - ulReg = (ulReg&0xFF00FFFF)|(0x30<<16); - BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0xA8, 4, TRUE); - } - ReadBeceemEEPROM(Adapter, 0x148, &ulReg); - if ((ulReg&0x00FF0000)>>16 > 0x30) { - ulReg = (ulReg&0xFF00FFFF)|(0x30<<16); - BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x148, 4, TRUE); - } - ulReg = 0; - BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x122, 4, TRUE); - ulReg = 0; - BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1C2, 4, TRUE); -} - -static int usbbcm_device_probe(struct usb_interface *intf, - const struct usb_device_id *id) -{ - struct usb_device *udev = interface_to_usbdev(intf); - int retval; - struct bcm_mini_adapter *psAdapter; - struct bcm_interface_adapter *psIntfAdapter; - struct net_device *ndev; - - /* Reserve one extra queue for the bit-bucket */ - ndev = alloc_etherdev_mq(sizeof(struct bcm_mini_adapter), - NO_OF_QUEUES + 1); - if (ndev == NULL) { - dev_err(&udev->dev, DRV_NAME ": no memory for device\n"); - return -ENOMEM; - } - - SET_NETDEV_DEV(ndev, &intf->dev); - - psAdapter = netdev_priv(ndev); - psAdapter->dev = ndev; - psAdapter->msg_enable = netif_msg_init(debug, default_msg); - - /* Init default driver debug state */ - - psAdapter->stDebugState.debug_level = DBG_LVL_CURR; - psAdapter->stDebugState.type = DBG_TYPE_INITEXIT; - - /* - * Technically, one can start using BCM_DEBUG_PRINT after this point. - * However, realize that by default the Type/Subtype bitmaps are all - * zero now; so no prints will actually appear until the TestApp turns - * on debug paths via the ioctl(); so practically speaking, in early - * init, no logging happens. - * - * A solution (used below): we explicitly set the bitmaps to 1 for - * Type=DBG_TYPE_INITEXIT and ALL subtype's of the same. Now all bcm - * debug statements get logged, enabling debug during early init. - * Further, we turn this OFF once init_module() completes. - */ - - psAdapter->stDebugState.subtype[DBG_TYPE_INITEXIT] = 0xff; - BCM_SHOW_DEBUG_BITMAP(psAdapter); - - retval = InitAdapter(psAdapter); - if (retval) { - dev_err(&udev->dev, DRV_NAME ": InitAdapter Failed\n"); - AdapterFree(psAdapter); - return retval; - } - - /* Allocate interface adapter structure */ - psIntfAdapter = kzalloc(sizeof(struct bcm_interface_adapter), - GFP_KERNEL); - if (psIntfAdapter == NULL) { - AdapterFree(psAdapter); - return -ENOMEM; - } - - psAdapter->pvInterfaceAdapter = psIntfAdapter; - psIntfAdapter->psAdapter = psAdapter; - - /* Store usb interface in Interface Adapter */ - psIntfAdapter->interface = intf; - usb_set_intfdata(intf, psIntfAdapter); - - BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, - "psIntfAdapter 0x%p\n", psIntfAdapter); - retval = InterfaceAdapterInit(psIntfAdapter); - if (retval) { - /* If the Firmware/Cfg File is not present - * then return success, let the application - * download the files. - */ - if (-ENOENT == retval) { - BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, - DBG_LVL_ALL, - "File Not Found. Use app to download.\n"); - return STATUS_SUCCESS; - } - BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, - DBG_LVL_ALL, "InterfaceAdapterInit failed.\n"); - usb_set_intfdata(intf, NULL); - udev = interface_to_usbdev(intf); - usb_put_dev(udev); - InterfaceAdapterFree(psIntfAdapter); - return retval; - } - if (psAdapter->chip_id > T3) { - uint32_t uiNackZeroLengthInt = 4; - - retval = - wrmalt(psAdapter, DISABLE_USB_ZERO_LEN_INT, - &uiNackZeroLengthInt, - sizeof(uiNackZeroLengthInt)); - if (retval) - return retval; - } - - /* Check whether the USB-Device Supports remote Wake-Up */ - if (USB_CONFIG_ATT_WAKEUP & udev->actconfig->desc.bmAttributes) { - /* If Suspend then only support dynamic suspend */ - if (psAdapter->bDoSuspend) { -#ifdef CONFIG_PM - pm_runtime_set_autosuspend_delay(&udev->dev, 0); - intf->needs_remote_wakeup = 1; - usb_enable_autosuspend(udev); - device_init_wakeup(&intf->dev, 1); - INIT_WORK(&psIntfAdapter->usbSuspendWork, - putUsbSuspend); - BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, - DBG_LVL_ALL, - "Enabling USB Auto-Suspend\n"); -#endif - } else { - intf->needs_remote_wakeup = 0; - usb_disable_autosuspend(udev); - } - } - - psAdapter->stDebugState.subtype[DBG_TYPE_INITEXIT] = 0x0; - return retval; -} - -static void usbbcm_disconnect(struct usb_interface *intf) -{ - struct bcm_interface_adapter *psIntfAdapter = usb_get_intfdata(intf); - struct bcm_mini_adapter *psAdapter; - struct usb_device *udev = interface_to_usbdev(intf); - - if (psIntfAdapter == NULL) - return; - - psAdapter = psIntfAdapter->psAdapter; - netif_device_detach(psAdapter->dev); - - if (psAdapter->bDoSuspend) - intf->needs_remote_wakeup = 0; - - psAdapter->device_removed = TRUE; - usb_set_intfdata(intf, NULL); - InterfaceAdapterFree(psIntfAdapter); - usb_put_dev(udev); -} - -static int AllocUsbCb(struct bcm_interface_adapter *psIntfAdapter) -{ - int i = 0; - - for (i = 0; i < MAXIMUM_USB_TCB; i++) { - psIntfAdapter->asUsbTcb[i].urb = usb_alloc_urb(0, GFP_KERNEL); - - if (psIntfAdapter->asUsbTcb[i].urb == NULL) { - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, - DBG_TYPE_PRINTK, 0, 0, - "Can't allocate Tx urb for index %d\n", - i); - return -ENOMEM; - } - } - - for (i = 0; i < MAXIMUM_USB_RCB; i++) { - psIntfAdapter->asUsbRcb[i].urb = usb_alloc_urb(0, GFP_KERNEL); - - if (psIntfAdapter->asUsbRcb[i].urb == NULL) { - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, - DBG_TYPE_PRINTK, 0, 0, - "Can't allocate Rx urb for index %d\n", - i); - return -ENOMEM; - } - - psIntfAdapter->asUsbRcb[i].urb->transfer_buffer = - kmalloc(MAX_DATA_BUFFER_SIZE, GFP_KERNEL); - - if (psIntfAdapter->asUsbRcb[i].urb->transfer_buffer == NULL) { - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, - DBG_TYPE_PRINTK, 0, 0, - "Can't allocate Rx buffer for index %d\n", - i); - return -ENOMEM; - } - psIntfAdapter->asUsbRcb[i].urb->transfer_buffer_length = - MAX_DATA_BUFFER_SIZE; - } - return 0; -} - -static int device_run(struct bcm_interface_adapter *psIntfAdapter) -{ - int value = 0; - UINT status = STATUS_SUCCESS; - struct bcm_mini_adapter *psAd = psIntfAdapter->psAdapter; - - status = InitCardAndDownloadFirmware(psAd); - if (status != STATUS_SUCCESS) { - pr_err(DRV_NAME "InitCardAndDownloadFirmware failed.\n"); - return status; - } - if (psAd->fw_download_done) { - if (StartInterruptUrb(psIntfAdapter)) { - BCM_DEBUG_PRINT(psAd, DBG_TYPE_INITEXIT, DRV_ENTRY, - DBG_LVL_ALL, - "Cannot send interrupt in URB\n"); - } - - /* - * now register the cntrl interface. after downloading the f/w - * waiting for 5 sec to get the mailbox interrupt. - */ - psAd->waiting_to_fw_download_done = false; - value = wait_event_timeout(psAd->ioctl_fw_dnld_wait_queue, - psAd->waiting_to_fw_download_done, - 5 * HZ); - - if (value == 0) - pr_err(DRV_NAME ": Timeout waiting for mailbox interrupt.\n"); - - if (register_control_device_interface(psAd) < 0) { - pr_err(DRV_NAME ": Register Control Device failed.\n"); - return -EIO; - } - } - return 0; -} - -static int select_alternate_setting_for_highspeed_modem( - struct bcm_interface_adapter *psIntfAdapter, - struct usb_endpoint_descriptor **endpoint, - const struct usb_host_interface *iface_desc, - int *usedIntOutForBulkTransfer) -{ - int retval = 0; - struct bcm_mini_adapter *psAd = psIntfAdapter->psAdapter; - - /* selecting alternate setting one as a default setting - * for High Speed modem. */ - if (psIntfAdapter->bHighSpeedDevice) - retval = usb_set_interface(psIntfAdapter->udev, - DEFAULT_SETTING_0, - ALTERNATE_SETTING_1); - BCM_DEBUG_PRINT(psAd, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, - "BCM16 is applicable on this dongle\n"); - if (retval || !psIntfAdapter->bHighSpeedDevice) { - *usedIntOutForBulkTransfer = EP2; - *endpoint = &iface_desc->endpoint[EP2].desc; - BCM_DEBUG_PRINT(psAd, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, - "Interface altsetting failed or modem is configured to Full Speed, hence will work on default setting 0\n"); - /* - * If Modem is high speed device EP2 should be - * INT OUT End point - * - * If Mode is FS then EP2 should be bulk end - * point - */ - if ((psIntfAdapter->bHighSpeedDevice && - !usb_endpoint_is_int_out(*endpoint)) || - (!psIntfAdapter->bHighSpeedDevice && - !usb_endpoint_is_bulk_out(*endpoint))) { - BCM_DEBUG_PRINT(psAd, DBG_TYPE_INITEXIT, DRV_ENTRY, - DBG_LVL_ALL, - "Configuring the EEPROM\n"); - /* change the EP2, EP4 to INT OUT end point */ - ConfigureEndPointTypesThroughEEPROM( - psAd); - - /* - * It resets the device and if any thing - * gets changed in USB descriptor it - * will show fail and re-enumerate the - * device - */ - retval = usb_reset_device(psIntfAdapter->udev); - if (retval) { - BCM_DEBUG_PRINT(psAd, DBG_TYPE_INITEXIT, - DRV_ENTRY, DBG_LVL_ALL, - "reset failed. Re-enumerating the device.\n"); - return retval; - } - - } - if (!psIntfAdapter->bHighSpeedDevice && - usb_endpoint_is_bulk_out(*endpoint)) { - /* - * Once BULK is selected in FS mode. - * Revert it back to INT. - * Else USB_IF will fail. - */ - UINT _uiData = ntohl(EP2_CFG_INT); - - BCM_DEBUG_PRINT(psAd, DBG_TYPE_INITEXIT, DRV_ENTRY, - DBG_LVL_ALL, - "Reverting Bulk to INT as it is in Full Speed mode.\n"); - BeceemEEPROMBulkWrite(psAd, (PUCHAR) & _uiData, 0x136, - 4, TRUE); - } - } else { - *usedIntOutForBulkTransfer = EP4; - *endpoint = &iface_desc->endpoint[EP4].desc; - BCM_DEBUG_PRINT(psAd, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, - "Choosing AltSetting as a default setting.\n"); - if (!usb_endpoint_is_int_out(*endpoint)) { - BCM_DEBUG_PRINT(psAd, DBG_TYPE_INITEXIT, DRV_ENTRY, - DBG_LVL_ALL, - "Dongle does not have BCM16 Fix.\n"); - /* - * change the EP2, EP4 to INT OUT end point and use EP4 - * in altsetting - */ - ConfigureEndPointTypesThroughEEPROM(psAd); - - /* - * It resets the device and if any thing - * gets changed in USB descriptor it - * will show fail and re-enumerate the - * device - */ - retval = usb_reset_device(psIntfAdapter->udev); - if (retval) { - BCM_DEBUG_PRINT(psAd, DBG_TYPE_INITEXIT, - DRV_ENTRY, DBG_LVL_ALL, - "reset failed. Re-enumerating the device.\n"); - return retval; - } - } - } - - return 0; -} - -static int InterfaceAdapterInit(struct bcm_interface_adapter *psIntfAdapter) -{ - struct usb_host_interface *iface_desc; - struct usb_endpoint_descriptor *endpoint; - size_t buffer_size; - unsigned long value; - int retval = 0; - int usedIntOutForBulkTransfer = 0; - bool bBcm16 = false; - UINT uiData = 0; - int bytes; - struct bcm_mini_adapter *psAd = psIntfAdapter->psAdapter; - - /* Store the usb dev into interface adapter */ - psIntfAdapter->udev = - usb_get_dev(interface_to_usbdev(psIntfAdapter->interface)); - - psIntfAdapter->bHighSpeedDevice = - (psIntfAdapter->udev->speed == USB_SPEED_HIGH); - psAd->interface_rdm = BcmRDM; - psAd->interface_wrm = BcmWRM; - - bytes = rdmalt(psAd, CHIP_ID_REG, (u32 *) &(psAd->chip_id), - sizeof(u32)); - if (bytes < 0) { - retval = bytes; - BCM_DEBUG_PRINT(psAd, DBG_TYPE_PRINTK, 0, 0, - "CHIP ID Read Failed\n"); - return retval; - } - - if (0xbece3200 == (psAd->chip_id & ~(0xF0))) - psAd->chip_id &= ~0xF0; - - dev_info(&psIntfAdapter->udev->dev, "RDM Chip ID 0x%lx\n", - psAd->chip_id); - - iface_desc = psIntfAdapter->interface->cur_altsetting; - - if (psAd->chip_id == T3B) { - /* T3B device will have EEPROM, check if EEPROM is proper and - * BCM16 can be done or not. */ - BeceemEEPROMBulkRead(psAd, &uiData, 0x0, 4); - if (uiData == BECM) - bBcm16 = TRUE; - - dev_info(&psIntfAdapter->udev->dev, - "number of alternate setting %d\n", - psIntfAdapter->interface->num_altsetting); - - if (bBcm16 == TRUE) { - retval = select_alternate_setting_for_highspeed_modem( - psIntfAdapter, &endpoint, iface_desc, - &usedIntOutForBulkTransfer); - if (retval) - return retval; - } - } - - iface_desc = psIntfAdapter->interface->cur_altsetting; - - for (value = 0; value < iface_desc->desc.bNumEndpoints; ++value) { - endpoint = &iface_desc->endpoint[value].desc; - - if (!psIntfAdapter->sBulkIn.bulk_in_endpointAddr && - usb_endpoint_is_bulk_in(endpoint)) { - buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); - psIntfAdapter->sBulkIn.bulk_in_size = buffer_size; - psIntfAdapter->sBulkIn.bulk_in_endpointAddr = - endpoint->bEndpointAddress; - psIntfAdapter->sBulkIn.bulk_in_pipe = usb_rcvbulkpipe( - psIntfAdapter->udev, - psIntfAdapter->sBulkIn.bulk_in_endpointAddr); - } - - if (!psIntfAdapter->sBulkOut.bulk_out_endpointAddr && - usb_endpoint_is_bulk_out(endpoint)) { - psIntfAdapter->sBulkOut.bulk_out_endpointAddr = - endpoint->bEndpointAddress; - psIntfAdapter->sBulkOut.bulk_out_pipe = usb_sndbulkpipe( - psIntfAdapter->udev, - psIntfAdapter->sBulkOut.bulk_out_endpointAddr); - } - - if (!psIntfAdapter->sIntrIn.int_in_endpointAddr && - usb_endpoint_is_int_in(endpoint)) { - buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); - psIntfAdapter->sIntrIn.int_in_size = buffer_size; - psIntfAdapter->sIntrIn.int_in_endpointAddr = - endpoint->bEndpointAddress; - psIntfAdapter->sIntrIn.int_in_interval = - endpoint->bInterval; - psIntfAdapter->sIntrIn.int_in_buffer = - kmalloc(buffer_size, GFP_KERNEL); - if (!psIntfAdapter->sIntrIn.int_in_buffer) - return -EINVAL; - } - - if (!psIntfAdapter->sIntrOut.int_out_endpointAddr && - usb_endpoint_is_int_out(endpoint)) { - if (!psIntfAdapter->sBulkOut.bulk_out_endpointAddr && - (psAd->chip_id == T3B) && - (value == usedIntOutForBulkTransfer)) { - /* - * use first intout end point as a bulk out end - * point - */ - buffer_size = - le16_to_cpu(endpoint->wMaxPacketSize); - psIntfAdapter->sBulkOut.bulk_out_size = - buffer_size; - psIntfAdapter->sBulkOut.bulk_out_endpointAddr = - endpoint->bEndpointAddress; - psIntfAdapter->sBulkOut.bulk_out_pipe = - usb_sndintpipe(psIntfAdapter->udev, - psIntfAdapter->sBulkOut - .bulk_out_endpointAddr); - psIntfAdapter->sBulkOut.int_out_interval = - endpoint->bInterval; - } else if (value == EP6) { - buffer_size = - le16_to_cpu(endpoint->wMaxPacketSize); - psIntfAdapter->sIntrOut.int_out_size = - buffer_size; - psIntfAdapter->sIntrOut.int_out_endpointAddr = - endpoint->bEndpointAddress; - psIntfAdapter->sIntrOut.int_out_interval = - endpoint->bInterval; - psIntfAdapter->sIntrOut.int_out_buffer = - kmalloc(buffer_size, GFP_KERNEL); - if (!psIntfAdapter->sIntrOut.int_out_buffer) - return -EINVAL; - } - } - } - - usb_set_intfdata(psIntfAdapter->interface, psIntfAdapter); - - psAd->bcm_file_download = InterfaceFileDownload; - psAd->bcm_file_readback_from_chip = InterfaceFileReadbackFromChip; - psAd->interface_transmit = InterfaceTransmitPacket; - - retval = CreateInterruptUrb(psIntfAdapter); - - if (retval) { - BCM_DEBUG_PRINT(psAd, DBG_TYPE_PRINTK, 0, 0, - "Cannot create interrupt urb\n"); - return retval; - } - - retval = AllocUsbCb(psIntfAdapter); - if (retval) - return retval; - - return device_run(psIntfAdapter); -} - -static int InterfaceSuspend(struct usb_interface *intf, pm_message_t message) -{ - struct bcm_interface_adapter *psIntfAdapter = usb_get_intfdata(intf); - - psIntfAdapter->bSuspended = TRUE; - - if (psIntfAdapter->bPreparingForBusSuspend) { - psIntfAdapter->bPreparingForBusSuspend = false; - - if (psIntfAdapter->psAdapter->LinkStatus == LINKUP_DONE) { - psIntfAdapter->psAdapter->IdleMode = TRUE; - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, - DBG_TYPE_INITEXIT, DRV_ENTRY, - DBG_LVL_ALL, - "Host Entered in PMU Idle Mode.\n"); - } else { - psIntfAdapter->psAdapter->bShutStatus = TRUE; - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, - DBG_TYPE_INITEXIT, DRV_ENTRY, - DBG_LVL_ALL, - "Host Entered in PMU Shutdown Mode.\n"); - } - } - psIntfAdapter->psAdapter->bPreparingForLowPowerMode = false; - - /* Signaling the control pkt path */ - wake_up(&psIntfAdapter->psAdapter->lowpower_mode_wait_queue); - - return 0; -} - -static int InterfaceResume(struct usb_interface *intf) -{ - struct bcm_interface_adapter *psIntfAdapter = usb_get_intfdata(intf); - - mdelay(100); - psIntfAdapter->bSuspended = false; - - StartInterruptUrb(psIntfAdapter); - InterfaceRx(psIntfAdapter); - return 0; -} - -static struct usb_driver usbbcm_driver = { - .name = "usbbcm", - .probe = usbbcm_device_probe, - .disconnect = usbbcm_disconnect, - .suspend = InterfaceSuspend, - .resume = InterfaceResume, - .id_table = InterfaceUsbtable, - .supports_autosuspend = 1, -}; - -struct class *bcm_class; - -static __init int bcm_init(void) -{ - int retval; - - pr_info("%s: %s, %s\n", DRV_NAME, DRV_DESCRIPTION, DRV_VERSION); - pr_info("%s\n", DRV_COPYRIGHT); - - bcm_class = class_create(THIS_MODULE, DRV_NAME); - if (IS_ERR(bcm_class)) { - pr_err(DRV_NAME ": could not create class\n"); - return PTR_ERR(bcm_class); - } - - retval = usb_register(&usbbcm_driver); - if (retval < 0) { - pr_err(DRV_NAME ": could not register usb driver\n"); - class_destroy(bcm_class); - return retval; - } - return 0; -} - -static __exit void bcm_exit(void) -{ - usb_deregister(&usbbcm_driver); - class_destroy(bcm_class); -} - -module_init(bcm_init); -module_exit(bcm_exit); - -MODULE_DESCRIPTION(DRV_DESCRIPTION); -MODULE_VERSION(DRV_VERSION); -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/bcm/InterfaceInit.h b/drivers/staging/bcm/InterfaceInit.h deleted file mode 100644 index ffa6e9667ec4..000000000000 --- a/drivers/staging/bcm/InterfaceInit.h +++ /dev/null @@ -1,26 +0,0 @@ -#ifndef _INTERFACE_INIT_H -#define _INTERFACE_INIT_H - -#define BCM_USB_VENDOR_ID_T3 0x198f -#define BCM_USB_VENDOR_ID_FOXCONN 0x0489 -#define BCM_USB_VENDOR_ID_ZTE 0x19d2 - -#define BCM_USB_PRODUCT_ID_T3 0x0300 -#define BCM_USB_PRODUCT_ID_T3B 0x0210 -#define BCM_USB_PRODUCT_ID_T3L 0x0220 -#define BCM_USB_PRODUCT_ID_SYM 0x15E -#define BCM_USB_PRODUCT_ID_1901 0xe017 -#define BCM_USB_PRODUCT_ID_226 0x0132 /* not sure if this is valid */ -#define BCM_USB_PRODUCT_ID_ZTE_226 0x172 -#define BCM_USB_PRODUCT_ID_ZTE_326 0x173 /* ZTE AX326 */ -#define BCM_USB_PRODUCT_ID_ZTE_TU25 0x0007 - -#define BCM_USB_MINOR_BASE 192 - -int InterfaceInitialize(void); - -int InterfaceExit(void); - -int usbbcm_worker_thread(struct bcm_interface_adapter *psIntfAdapter); - -#endif diff --git a/drivers/staging/bcm/InterfaceIsr.c b/drivers/staging/bcm/InterfaceIsr.c deleted file mode 100644 index b9f8a7aa24fe..000000000000 --- a/drivers/staging/bcm/InterfaceIsr.c +++ /dev/null @@ -1,190 +0,0 @@ -#include "headers.h" - - -static void read_int_callback(struct urb *urb/*, struct pt_regs *regs*/) -{ - int status = urb->status; - struct bcm_interface_adapter *psIntfAdapter = - (struct bcm_interface_adapter *)urb->context; - struct bcm_mini_adapter *Adapter = psIntfAdapter->psAdapter; - - if (netif_msg_intr(Adapter)) - pr_info(PFX "%s: interrupt status %d\n", - Adapter->dev->name, status); - - if (Adapter->device_removed) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT, - DBG_LVL_ALL, "Device has Got Removed."); - return; - } - - if ((Adapter->bPreparingForLowPowerMode && Adapter->bDoSuspend) || - psIntfAdapter->bSuspended || - psIntfAdapter->bPreparingForBusSuspend) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT, - DBG_LVL_ALL, - "Interrupt call back is called while suspending the device"); - return; - } - - switch (status) { - /* success */ - case STATUS_SUCCESS: - if (urb->actual_length) { - - if (psIntfAdapter->ulInterruptData[1] & 0xFF) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, - INTF_INIT, DBG_LVL_ALL, - "Got USIM interrupt"); - } - - if (psIntfAdapter->ulInterruptData[1] & 0xFF00) { - atomic_set(&Adapter->CurrNumFreeTxDesc, - (psIntfAdapter->ulInterruptData[1] & - 0xFF00) >> 8); - atomic_set(&Adapter->uiMBupdate, TRUE); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, - INTF_INIT, DBG_LVL_ALL, - "TX mailbox contains %d", - atomic_read(&Adapter->CurrNumFreeTxDesc)); - } - if (psIntfAdapter->ulInterruptData[1] >> 16) { - Adapter->CurrNumRecvDescs = - (psIntfAdapter->ulInterruptData[1] >> 16); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, - INTF_INIT, DBG_LVL_ALL, - "RX mailbox contains %d", - Adapter->CurrNumRecvDescs); - InterfaceRx(psIntfAdapter); - } - if (Adapter->fw_download_done && - !Adapter->downloadDDR && - atomic_read(&Adapter->CurrNumFreeTxDesc)) { - - psIntfAdapter->psAdapter->downloadDDR += 1; - wake_up(&Adapter->tx_packet_wait_queue); - } - if (!Adapter->waiting_to_fw_download_done) { - Adapter->waiting_to_fw_download_done = TRUE; - wake_up(&Adapter->ioctl_fw_dnld_wait_queue); - } - if (!atomic_read(&Adapter->TxPktAvail)) { - atomic_set(&Adapter->TxPktAvail, 1); - wake_up(&Adapter->tx_packet_wait_queue); - } - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT, - DBG_LVL_ALL, "Firing interrupt in URB"); - } - break; - case -ENOENT: - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT, - DBG_LVL_ALL, "URB has got disconnected...."); - return; - case -EINPROGRESS: - /* - * This situation may happened when URBunlink is used. for - * detail check usb_unlink_urb documentation. - */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT, - DBG_LVL_ALL, - "Impossibe condition has occurred... something very bad is going on"); - break; - /* return; */ - case -EPIPE: - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT, - DBG_LVL_ALL, - "Interrupt IN endPoint has got halted/stalled...need to clear this"); - Adapter->bEndPointHalted = TRUE; - wake_up(&Adapter->tx_packet_wait_queue); - urb->status = STATUS_SUCCESS; - return; - /* software-driven interface shutdown */ - case -ECONNRESET: /* URB got unlinked */ - case -ESHUTDOWN: /* hardware gone. this is the serious problem */ - /* - * Occurs only when something happens with the - * host controller device - */ - case -ENODEV: /* Device got removed */ - case -EINVAL: - /* - * Some thing very bad happened with the URB. No - * description is available. - */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT, - DBG_LVL_ALL, "interrupt urb error %d", status); - urb->status = STATUS_SUCCESS; - break; - /* return; */ - default: - /* - * This is required to check what is the defaults conditions - * when it occurs.. - */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, - "GOT DEFAULT INTERRUPT URB STATUS :%d..Please Analyze it...", - status); - break; - } - - StartInterruptUrb(psIntfAdapter); - - -} - -int CreateInterruptUrb(struct bcm_interface_adapter *psIntfAdapter) -{ - psIntfAdapter->psInterruptUrb = usb_alloc_urb(0, GFP_KERNEL); - if (!psIntfAdapter->psInterruptUrb) { - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, - INTF_INIT, DBG_LVL_ALL, - "Cannot allocate interrupt urb"); - return -ENOMEM; - } - psIntfAdapter->psInterruptUrb->transfer_buffer = - psIntfAdapter->ulInterruptData; - psIntfAdapter->psInterruptUrb->transfer_buffer_length = - sizeof(psIntfAdapter->ulInterruptData); - - psIntfAdapter->sIntrIn.int_in_pipe = usb_rcvintpipe(psIntfAdapter->udev, - psIntfAdapter->sIntrIn.int_in_endpointAddr); - - usb_fill_int_urb(psIntfAdapter->psInterruptUrb, psIntfAdapter->udev, - psIntfAdapter->sIntrIn.int_in_pipe, - psIntfAdapter->psInterruptUrb->transfer_buffer, - psIntfAdapter->psInterruptUrb->transfer_buffer_length, - read_int_callback, psIntfAdapter, - psIntfAdapter->sIntrIn.int_in_interval); - - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, INTF_INIT, - DBG_LVL_ALL, "Interrupt Interval: %d\n", - psIntfAdapter->sIntrIn.int_in_interval); - return 0; -} - - -INT StartInterruptUrb(struct bcm_interface_adapter *psIntfAdapter) -{ - INT status = 0; - - if (!(psIntfAdapter->psAdapter->device_removed || - psIntfAdapter->psAdapter->bEndPointHalted || - psIntfAdapter->bSuspended || - psIntfAdapter->bPreparingForBusSuspend || - psIntfAdapter->psAdapter->StopAllXaction)) { - status = - usb_submit_urb(psIntfAdapter->psInterruptUrb, GFP_ATOMIC); - if (status) { - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, - DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL, - "Cannot send inturb %d\n", status); - if (status == -EPIPE) { - psIntfAdapter->psAdapter->bEndPointHalted = - TRUE; - wake_up(&psIntfAdapter->psAdapter->tx_packet_wait_queue); - } - } - } - return status; -} - diff --git a/drivers/staging/bcm/InterfaceIsr.h b/drivers/staging/bcm/InterfaceIsr.h deleted file mode 100644 index 3073bd71cfeb..000000000000 --- a/drivers/staging/bcm/InterfaceIsr.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef _INTERFACE_ISR_H -#define _INTERFACE_ISR_H - -int CreateInterruptUrb(struct bcm_interface_adapter *psIntfAdapter); - - -INT StartInterruptUrb(struct bcm_interface_adapter *psIntfAdapter); - - -VOID InterfaceEnableInterrupt(struct bcm_mini_adapter *Adapter); - -VOID InterfaceDisableInterrupt(struct bcm_mini_adapter *Adapter); - -#endif - diff --git a/drivers/staging/bcm/InterfaceMacros.h b/drivers/staging/bcm/InterfaceMacros.h deleted file mode 100644 index fedb79437f33..000000000000 --- a/drivers/staging/bcm/InterfaceMacros.h +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef _INTERFACE_MACROS_H -#define _INTERFACE_MACROS_H - -#define BCM_USB_MAX_READ_LENGTH 2048 - -#define MAXIMUM_USB_TCB 128 -#define MAXIMUM_USB_RCB 128 - -#define MAX_BUFFERS_PER_QUEUE 256 - -#define MAX_DATA_BUFFER_SIZE 2048 - -/* Num of Asynchronous reads pending */ -#define NUM_RX_DESC 64 - -#define SYS_CFG 0x0F000C00 - -#endif diff --git a/drivers/staging/bcm/InterfaceMisc.c b/drivers/staging/bcm/InterfaceMisc.c deleted file mode 100644 index 1f31b8f05884..000000000000 --- a/drivers/staging/bcm/InterfaceMisc.c +++ /dev/null @@ -1,246 +0,0 @@ -#include "headers.h" - -static int adapter_err_occurred(const struct bcm_interface_adapter *ad) -{ - if (ad->psAdapter->device_removed == TRUE) { - BCM_DEBUG_PRINT(ad->psAdapter, DBG_TYPE_PRINTK, 0, 0, - "Device got removed"); - return -ENODEV; - } - - if ((ad->psAdapter->StopAllXaction == TRUE) && - (ad->psAdapter->chip_id >= T3LPB)) { - BCM_DEBUG_PRINT(ad->psAdapter, DBG_TYPE_OTHERS, RDM, - DBG_LVL_ALL, - "Currently Xaction is not allowed on the bus"); - return -EACCES; - } - - if (ad->bSuspended == TRUE || ad->bPreparingForBusSuspend == TRUE) { - BCM_DEBUG_PRINT(ad->psAdapter, DBG_TYPE_OTHERS, RDM, - DBG_LVL_ALL, - "Bus is in suspended states hence RDM not allowed.."); - return -EACCES; - } - - return 0; -} - -int InterfaceRDM(struct bcm_interface_adapter *psIntfAdapter, - unsigned int addr, - void *buff, - int len) -{ - int bytes; - int err = 0; - - if (!psIntfAdapter) - return -EINVAL; - - err = adapter_err_occurred(psIntfAdapter); - if (err) - return err; - - psIntfAdapter->psAdapter->DeviceAccess = TRUE; - - bytes = usb_control_msg(psIntfAdapter->udev, - usb_rcvctrlpipe(psIntfAdapter->udev, 0), - 0x02, - 0xC2, - (addr & 0xFFFF), - ((addr >> 16) & 0xFFFF), - buff, - len, - 5000); - - if (-ENODEV == bytes) - psIntfAdapter->psAdapter->device_removed = TRUE; - - if (bytes < 0) - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, RDM, - DBG_LVL_ALL, "RDM failed status :%d", bytes); - else - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, RDM, - DBG_LVL_ALL, "RDM sent %d", bytes); - - psIntfAdapter->psAdapter->DeviceAccess = false; - return bytes; -} - -int InterfaceWRM(struct bcm_interface_adapter *psIntfAdapter, - unsigned int addr, - void *buff, - int len) -{ - int retval = 0; - int err = 0; - - if (!psIntfAdapter) - return -EINVAL; - - err = adapter_err_occurred(psIntfAdapter); - if (err) - return err; - - psIntfAdapter->psAdapter->DeviceAccess = TRUE; - - retval = usb_control_msg(psIntfAdapter->udev, - usb_sndctrlpipe(psIntfAdapter->udev, 0), - 0x01, - 0x42, - (addr & 0xFFFF), - ((addr >> 16) & 0xFFFF), - buff, - len, - 5000); - - if (-ENODEV == retval) - psIntfAdapter->psAdapter->device_removed = TRUE; - - if (retval < 0) { - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, WRM, - DBG_LVL_ALL, "WRM failed status :%d", retval); - psIntfAdapter->psAdapter->DeviceAccess = false; - return retval; - } - psIntfAdapter->psAdapter->DeviceAccess = false; - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, WRM, - DBG_LVL_ALL, "WRM sent %d", retval); - return STATUS_SUCCESS; -} - -int BcmRDM(void *arg, - unsigned int addr, - void *buff, - int len) -{ - return InterfaceRDM((struct bcm_interface_adapter *)arg, addr, buff, - len); -} - -int BcmWRM(void *arg, - unsigned int addr, - void *buff, - int len) -{ - return InterfaceWRM((struct bcm_interface_adapter *)arg, addr, buff, - len); -} - -int Bcm_clear_halt_of_endpoints(struct bcm_mini_adapter *Adapter) -{ - struct bcm_interface_adapter *psIntfAdapter = - (struct bcm_interface_adapter *)(Adapter->pvInterfaceAdapter); - int status = STATUS_SUCCESS; - - /* - * usb_clear_halt - tells device to clear endpoint halt/stall condition - * @dev: device whose endpoint is halted - * @pipe: endpoint "pipe" being cleared - * @ Context: !in_interrupt () - * - * usb_clear_halt is the synchrnous call and returns 0 on success else - * returns with error code. - * This is used to clear halt conditions for bulk and interrupt - * endpoints only. - * Control and isochronous endpoints never halts. - * - * Any URBs queued for such an endpoint should normally be unlinked by - * the driver before clearing the halt condition. - * - */ - - /* Killing all the submitted urbs to different end points. */ - Bcm_kill_all_URBs(psIntfAdapter); - - /* clear the halted/stalled state for every end point */ - status = usb_clear_halt(psIntfAdapter->udev, - psIntfAdapter->sIntrIn.int_in_pipe); - if (status != STATUS_SUCCESS) - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT, - DBG_LVL_ALL, - "Unable to Clear Halt of Interrupt IN end point. :%d ", - status); - - status = usb_clear_halt(psIntfAdapter->udev, - psIntfAdapter->sBulkIn.bulk_in_pipe); - if (status != STATUS_SUCCESS) - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT, - DBG_LVL_ALL, - "Unable to Clear Halt of Bulk IN end point. :%d ", - status); - - status = usb_clear_halt(psIntfAdapter->udev, - psIntfAdapter->sBulkOut.bulk_out_pipe); - if (status != STATUS_SUCCESS) - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT, - DBG_LVL_ALL, - "Unable to Clear Halt of Bulk OUT end point. :%d ", - status); - - return status; -} - -void Bcm_kill_all_URBs(struct bcm_interface_adapter *psIntfAdapter) -{ - struct urb *tempUrb = NULL; - unsigned int i; - - /* - * usb_kill_urb - cancel a transfer request and wait for it to finish - * @urb: pointer to URB describing a previously submitted request, - * returns nothing as it is void returned API. - * - * This routine cancels an in-progress request. It is guaranteed that - * upon return all completion handlers will have finished and the URB - * will be totally idle and available for reuse - * - * This routine may not be used in an interrupt context (such as a - * bottom half or a completion handler), or when holding a spinlock, or - * in other situations where the caller can't schedule(). - * - */ - - /* Cancel submitted Interrupt-URB's */ - if (psIntfAdapter->psInterruptUrb) { - if (psIntfAdapter->psInterruptUrb->status == -EINPROGRESS) - usb_kill_urb(psIntfAdapter->psInterruptUrb); - } - - /* Cancel All submitted TX URB's */ - for (i = 0; i < MAXIMUM_USB_TCB; i++) { - tempUrb = psIntfAdapter->asUsbTcb[i].urb; - if (tempUrb) { - if (tempUrb->status == -EINPROGRESS) - usb_kill_urb(tempUrb); - } - } - - for (i = 0; i < MAXIMUM_USB_RCB; i++) { - tempUrb = psIntfAdapter->asUsbRcb[i].urb; - if (tempUrb) { - if (tempUrb->status == -EINPROGRESS) - usb_kill_urb(tempUrb); - } - } - - atomic_set(&psIntfAdapter->uNumTcbUsed, 0); - atomic_set(&psIntfAdapter->uCurrTcb, 0); - - atomic_set(&psIntfAdapter->uNumRcbUsed, 0); - atomic_set(&psIntfAdapter->uCurrRcb, 0); -} - -void putUsbSuspend(struct work_struct *work) -{ - struct bcm_interface_adapter *psIntfAdapter = NULL; - struct usb_interface *intf = NULL; - - psIntfAdapter = container_of(work, struct bcm_interface_adapter, - usbSuspendWork); - intf = psIntfAdapter->interface; - - if (psIntfAdapter->bSuspended == false) - usb_autopm_put_interface(intf); -} - diff --git a/drivers/staging/bcm/InterfaceMisc.h b/drivers/staging/bcm/InterfaceMisc.h deleted file mode 100644 index 0e5e38b26329..000000000000 --- a/drivers/staging/bcm/InterfaceMisc.h +++ /dev/null @@ -1,42 +0,0 @@ -#ifndef __INTERFACE_MISC_H -#define __INTERFACE_MISC_H - -INT -InterfaceRDM(struct bcm_interface_adapter *psIntfAdapter, - UINT addr, - PVOID buff, - INT len); - -INT -InterfaceWRM(struct bcm_interface_adapter *psIntfAdapter, - UINT addr, - PVOID buff, - INT len); - - -int InterfaceFileDownload(PVOID psIntfAdapter, - struct file *flp, - unsigned int on_chip_loc); - -int InterfaceFileReadbackFromChip(PVOID psIntfAdapter, - struct file *flp, - unsigned int on_chip_loc); - - -int BcmRDM(PVOID arg, - UINT addr, - PVOID buff, - INT len); - -int BcmWRM(PVOID arg, - UINT addr, - PVOID buff, - INT len); - -INT Bcm_clear_halt_of_endpoints(struct bcm_mini_adapter *Adapter); - -VOID Bcm_kill_all_URBs(struct bcm_interface_adapter *psIntfAdapter); - -#define DISABLE_USB_ZERO_LEN_INT 0x0F011878 - -#endif /* __INTERFACE_MISC_H */ diff --git a/drivers/staging/bcm/InterfaceRx.c b/drivers/staging/bcm/InterfaceRx.c deleted file mode 100644 index 0f179b9382d3..000000000000 --- a/drivers/staging/bcm/InterfaceRx.c +++ /dev/null @@ -1,289 +0,0 @@ -#include "headers.h" - -static void handle_control_packet(struct bcm_interface_adapter *interface, - struct bcm_mini_adapter *ad, - struct bcm_leader *leader, - struct sk_buff *skb, - struct urb *urb) -{ - BCM_DEBUG_PRINT(interface->psAdapter, DBG_TYPE_RX, RX_CTRL, DBG_LVL_ALL, - "Received control pkt..."); - *(PUSHORT)skb->data = leader->Status; - memcpy(skb->data+sizeof(USHORT), urb->transfer_buffer + - (sizeof(struct bcm_leader)), leader->PLength); - skb->len = leader->PLength + sizeof(USHORT); - - spin_lock(&ad->control_queue_lock); - ENQUEUEPACKET(ad->RxControlHead, ad->RxControlTail, skb); - spin_unlock(&ad->control_queue_lock); - - atomic_inc(&ad->cntrlpktCnt); - wake_up(&ad->process_rx_cntrlpkt); -} - -static void format_eth_hdr_to_stack(struct bcm_interface_adapter *interface, - struct bcm_mini_adapter *ad, - struct bcm_leader *p_leader, - struct sk_buff *skb, - struct urb *urb, - UINT ui_index, - int queue_index, - bool b_header_supression_endabled) -{ - /* - * Data Packet, Format a proper Ethernet Header - * and give it to the stack - */ - BCM_DEBUG_PRINT(interface->psAdapter, DBG_TYPE_RX, RX_DATA, - DBG_LVL_ALL, "Received Data pkt..."); - skb_reserve(skb, 2 + SKB_RESERVE_PHS_BYTES); - memcpy(skb->data+ETH_HLEN, (PUCHAR)urb->transfer_buffer + - sizeof(struct bcm_leader), p_leader->PLength); - skb->dev = ad->dev; - - /* currently skb->len has extra ETH_HLEN bytes in the beginning */ - skb_put(skb, p_leader->PLength + ETH_HLEN); - ad->PackInfo[queue_index].uiTotalRxBytes += p_leader->PLength; - ad->PackInfo[queue_index].uiThisPeriodRxBytes += p_leader->PLength; - BCM_DEBUG_PRINT(interface->psAdapter, DBG_TYPE_RX, RX_DATA, - DBG_LVL_ALL, "Received Data pkt of len :0x%X", - p_leader->PLength); - - if (netif_running(ad->dev)) { - /* Moving ahead by ETH_HLEN to the data ptr as received from FW */ - skb_pull(skb, ETH_HLEN); - PHSReceive(ad, p_leader->Vcid, skb, &skb->len, - NULL, b_header_supression_endabled); - - if (!ad->PackInfo[queue_index].bEthCSSupport) { - skb_push(skb, ETH_HLEN); - - memcpy(skb->data, skb->dev->dev_addr, 6); - memcpy(skb->data+6, skb->dev->dev_addr, 6); - (*(skb->data+11))++; - *(skb->data+12) = 0x08; - *(skb->data+13) = 0x00; - p_leader->PLength += ETH_HLEN; - } - - skb->protocol = eth_type_trans(skb, ad->dev); - netif_rx(skb); - } else { - BCM_DEBUG_PRINT(interface->psAdapter, DBG_TYPE_RX, - RX_DATA, DBG_LVL_ALL, - "i/f not up hance freeing SKB..."); - dev_kfree_skb(skb); - } - - ++ad->dev->stats.rx_packets; - ad->dev->stats.rx_bytes += p_leader->PLength; - - for (ui_index = 0; ui_index < MIBS_MAX_HIST_ENTRIES; ui_index++) { - if ((p_leader->PLength <= - MIBS_PKTSIZEHIST_RANGE*(ui_index+1)) && - (p_leader->PLength > MIBS_PKTSIZEHIST_RANGE*(ui_index))) - - ad->aRxPktSizeHist[ui_index]++; - } -} - -static int SearchVcid(struct bcm_mini_adapter *Adapter, unsigned short usVcid) -{ - int iIndex = 0; - - for (iIndex = (NO_OF_QUEUES-1); iIndex >= 0; iIndex--) - if (Adapter->PackInfo[iIndex].usVCID_Value == usVcid) - return iIndex; - return NO_OF_QUEUES+1; - -} - - -static struct bcm_usb_rcb * -GetBulkInRcb(struct bcm_interface_adapter *psIntfAdapter) -{ - struct bcm_usb_rcb *pRcb = NULL; - UINT index = 0; - - if ((atomic_read(&psIntfAdapter->uNumRcbUsed) < MAXIMUM_USB_RCB) && - (psIntfAdapter->psAdapter->StopAllXaction == false)) { - index = atomic_read(&psIntfAdapter->uCurrRcb); - pRcb = &psIntfAdapter->asUsbRcb[index]; - pRcb->bUsed = TRUE; - pRcb->psIntfAdapter = psIntfAdapter; - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_RX, RX_DPC, - DBG_LVL_ALL, "Got Rx desc %d used %d", index, - atomic_read(&psIntfAdapter->uNumRcbUsed)); - index = (index + 1) % MAXIMUM_USB_RCB; - atomic_set(&psIntfAdapter->uCurrRcb, index); - atomic_inc(&psIntfAdapter->uNumRcbUsed); - } - return pRcb; -} - -/*this is receive call back - when pkt available for receive (BULK IN- end point)*/ -static void read_bulk_callback(struct urb *urb) -{ - struct sk_buff *skb = NULL; - bool bHeaderSupressionEnabled = false; - int QueueIndex = NO_OF_QUEUES + 1; - UINT uiIndex = 0; - struct bcm_usb_rcb *pRcb = (struct bcm_usb_rcb *)urb->context; - struct bcm_interface_adapter *psIntfAdapter = pRcb->psIntfAdapter; - struct bcm_mini_adapter *Adapter = psIntfAdapter->psAdapter; - struct bcm_leader *pLeader = urb->transfer_buffer; - - if (unlikely(netif_msg_rx_status(Adapter))) - pr_info(PFX "%s: rx urb status %d length %d\n", - Adapter->dev->name, urb->status, urb->actual_length); - - if ((Adapter->device_removed == TRUE) || - (TRUE == Adapter->bEndPointHalted) || - (0 == urb->actual_length)) { - pRcb->bUsed = false; - atomic_dec(&psIntfAdapter->uNumRcbUsed); - return; - } - - if (urb->status != STATUS_SUCCESS) { - if (urb->status == -EPIPE) { - Adapter->bEndPointHalted = TRUE; - wake_up(&Adapter->tx_packet_wait_queue); - } else { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, - DBG_LVL_ALL, - "Rx URB has got cancelled. status :%d", - urb->status); - } - pRcb->bUsed = false; - atomic_dec(&psIntfAdapter->uNumRcbUsed); - urb->status = STATUS_SUCCESS; - return; - } - - if (Adapter->bDoSuspend && (Adapter->bPreparingForLowPowerMode)) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, - "device is going in low power mode while PMU option selected..hence rx packet should not be process"); - return; - } - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, - "Read back done len %d\n", pLeader->PLength); - if (!pLeader->PLength) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, - "Leader Length 0"); - atomic_dec(&psIntfAdapter->uNumRcbUsed); - return; - } - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, - "Leader Status:0x%hX, Length:0x%hX, VCID:0x%hX", - pLeader->Status, pLeader->PLength, pLeader->Vcid); - if (MAX_CNTL_PKT_SIZE < pLeader->PLength) { - if (netif_msg_rx_err(Adapter)) - pr_info(PFX "%s: corrupted leader length...%d\n", - Adapter->dev->name, pLeader->PLength); - ++Adapter->dev->stats.rx_dropped; - atomic_dec(&psIntfAdapter->uNumRcbUsed); - return; - } - - QueueIndex = SearchVcid(Adapter, pLeader->Vcid); - if (QueueIndex < NO_OF_QUEUES) { - bHeaderSupressionEnabled = - Adapter->PackInfo[QueueIndex].bHeaderSuppressionEnabled; - bHeaderSupressionEnabled = - bHeaderSupressionEnabled & Adapter->bPHSEnabled; - } - - skb = dev_alloc_skb(pLeader->PLength + SKB_RESERVE_PHS_BYTES + - SKB_RESERVE_ETHERNET_HEADER); - if (!skb) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, - "NO SKBUFF!!! Dropping the Packet"); - atomic_dec(&psIntfAdapter->uNumRcbUsed); - return; - } - /* If it is a control Packet, then call handle_bcm_packet ()*/ - if ((ntohs(pLeader->Vcid) == VCID_CONTROL_PACKET) || - (!(pLeader->Status >= 0x20 && pLeader->Status <= 0x3F))) { - handle_control_packet(psIntfAdapter, Adapter, pLeader, skb, - urb); - } else { - format_eth_hdr_to_stack(psIntfAdapter, Adapter, pLeader, skb, - urb, uiIndex, QueueIndex, - bHeaderSupressionEnabled); - } - Adapter->PrevNumRecvDescs++; - pRcb->bUsed = false; - atomic_dec(&psIntfAdapter->uNumRcbUsed); -} - -static int ReceiveRcb(struct bcm_interface_adapter *psIntfAdapter, - struct bcm_usb_rcb *pRcb) -{ - struct urb *urb = pRcb->urb; - int retval = 0; - - usb_fill_bulk_urb(urb, psIntfAdapter->udev, - usb_rcvbulkpipe(psIntfAdapter->udev, - psIntfAdapter->sBulkIn.bulk_in_endpointAddr), - urb->transfer_buffer, - BCM_USB_MAX_READ_LENGTH, - read_bulk_callback, pRcb); - - if (false == psIntfAdapter->psAdapter->device_removed && - false == psIntfAdapter->psAdapter->bEndPointHalted && - false == psIntfAdapter->bSuspended && - false == psIntfAdapter->bPreparingForBusSuspend) { - retval = usb_submit_urb(urb, GFP_ATOMIC); - if (retval) { - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_RX, - RX_DPC, DBG_LVL_ALL, - "failed submitting read urb, error %d", - retval); - /* if this return value is because of pipe halt. need to clear this. */ - if (retval == -EPIPE) { - psIntfAdapter->psAdapter->bEndPointHalted = TRUE; - wake_up(&psIntfAdapter->psAdapter->tx_packet_wait_queue); - } - - } - } - return retval; -} - -/* -Function: InterfaceRx - -Description: This is the hardware specific Function for Receiving - data packet/control packets from the device. - -Input parameters: IN struct bcm_mini_adapter *Adapter - Miniport Adapter Context - - - -Return: TRUE - If Rx was successful. - Other - If an error occurred. -*/ - -bool InterfaceRx(struct bcm_interface_adapter *psIntfAdapter) -{ - USHORT RxDescCount = NUM_RX_DESC - - atomic_read(&psIntfAdapter->uNumRcbUsed); - - struct bcm_usb_rcb *pRcb = NULL; - - while (RxDescCount) { - pRcb = GetBulkInRcb(psIntfAdapter); - if (pRcb == NULL) { - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, - DBG_TYPE_PRINTK, 0, 0, - "Unable to get Rcb pointer"); - return false; - } - ReceiveRcb(psIntfAdapter, pRcb); - RxDescCount--; - } - return TRUE; -} - diff --git a/drivers/staging/bcm/InterfaceRx.h b/drivers/staging/bcm/InterfaceRx.h deleted file mode 100644 index b4e858bcda34..000000000000 --- a/drivers/staging/bcm/InterfaceRx.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _INTERFACE_RX_H -#define _INTERFACE_RX_H - -bool InterfaceRx(struct bcm_interface_adapter *Adapter); - -#endif - diff --git a/drivers/staging/bcm/InterfaceTx.c b/drivers/staging/bcm/InterfaceTx.c deleted file mode 100644 index 9b3f64b821ed..000000000000 --- a/drivers/staging/bcm/InterfaceTx.c +++ /dev/null @@ -1,213 +0,0 @@ -#include "headers.h" - -static void prepare_low_power_mode(struct urb *urb, - struct bcm_interface_adapter *interface, - struct bcm_mini_adapter *ps_adapter, - struct bcm_mini_adapter *ad, - struct bcm_link_request *p_control_msg, - bool *b_power_down_msg) -{ - if (((p_control_msg->szData[0] == GO_TO_IDLE_MODE_PAYLOAD) && - (p_control_msg->szData[1] == TARGET_CAN_GO_TO_IDLE_MODE))) { - - *b_power_down_msg = TRUE; - /* - * This covers the bus err while Idle Request msg - * sent down. - */ - if (urb->status != STATUS_SUCCESS) { - ps_adapter->bPreparingForLowPowerMode = false; - BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, NEXT_SEND, - DBG_LVL_ALL, - "Idle Mode Request msg failed to reach to Modem"); - /* Signalling the cntrl pkt path in Ioctl */ - wake_up(&ps_adapter->lowpower_mode_wait_queue); - StartInterruptUrb(interface); - return; - } - - if (ps_adapter->bDoSuspend == false) { - ps_adapter->IdleMode = TRUE; - /* since going in Idle mode completed hence making this var false */ - ps_adapter->bPreparingForLowPowerMode = false; - - BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, NEXT_SEND, - DBG_LVL_ALL, - "Host Entered in Idle Mode State..."); - /* Signalling the cntrl pkt path in Ioctl*/ - wake_up(&ps_adapter->lowpower_mode_wait_queue); - } - - } else if ((p_control_msg->Leader.Status == LINK_UP_CONTROL_REQ) && - (p_control_msg->szData[0] == LINK_UP_ACK) && - (p_control_msg->szData[1] == LINK_SHUTDOWN_REQ_FROM_FIRMWARE) && - (p_control_msg->szData[2] == SHUTDOWN_ACK_FROM_DRIVER)) { - /* - * This covers the bus err while shutdown Request - * msg sent down. - */ - if (urb->status != STATUS_SUCCESS) { - ps_adapter->bPreparingForLowPowerMode = false; - BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, NEXT_SEND, - DBG_LVL_ALL, - "Shutdown Request Msg failed to reach to Modem"); - /* Signalling the cntrl pkt path in Ioctl */ - wake_up(&ps_adapter->lowpower_mode_wait_queue); - StartInterruptUrb(interface); - return; - } - - *b_power_down_msg = TRUE; - if (ps_adapter->bDoSuspend == false) { - ps_adapter->bShutStatus = TRUE; - /* - * since going in shutdown mode completed hence - * making this var false - */ - ps_adapter->bPreparingForLowPowerMode = false; - BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, NEXT_SEND, - DBG_LVL_ALL, - "Host Entered in shutdown Mode State..."); - /* Signalling the cntrl pkt path in Ioctl */ - wake_up(&ps_adapter->lowpower_mode_wait_queue); - } - } - - if (ps_adapter->bDoSuspend && *b_power_down_msg) { - /* issuing bus suspend request */ - BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, - "Issuing the Bus suspend request to USB stack"); - interface->bPreparingForBusSuspend = TRUE; - schedule_work(&interface->usbSuspendWork); - } -} - -/*this is transmit call-back(BULK OUT)*/ -static void write_bulk_callback(struct urb *urb/*, struct pt_regs *regs*/) -{ - struct bcm_usb_tcb *pTcb = (struct bcm_usb_tcb *)urb->context; - struct bcm_interface_adapter *psIntfAdapter = pTcb->psIntfAdapter; - struct bcm_link_request *pControlMsg = - (struct bcm_link_request *)urb->transfer_buffer; - struct bcm_mini_adapter *psAdapter = psIntfAdapter->psAdapter; - bool bpowerDownMsg = false; - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - - if (unlikely(netif_msg_tx_done(Adapter))) - pr_info(PFX "%s: transmit status %d\n", Adapter->dev->name, - urb->status); - - if (urb->status != STATUS_SUCCESS) { - if (urb->status == -EPIPE) { - psIntfAdapter->psAdapter->bEndPointHalted = TRUE; - wake_up(&psIntfAdapter->psAdapter->tx_packet_wait_queue); - } else { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND, - DBG_LVL_ALL, - "Tx URB has got cancelled. status :%d", - urb->status); - } - } - - pTcb->bUsed = false; - atomic_dec(&psIntfAdapter->uNumTcbUsed); - - if (TRUE == psAdapter->bPreparingForLowPowerMode) { - prepare_low_power_mode(urb, psIntfAdapter, psAdapter, Adapter, - pControlMsg, &bpowerDownMsg); - } - - usb_free_coherent(urb->dev, urb->transfer_buffer_length, - urb->transfer_buffer, urb->transfer_dma); -} - - -static struct bcm_usb_tcb *GetBulkOutTcb(struct bcm_interface_adapter *psIntfAdapter) -{ - struct bcm_usb_tcb *pTcb = NULL; - UINT index = 0; - - if ((atomic_read(&psIntfAdapter->uNumTcbUsed) < MAXIMUM_USB_TCB) && - (psIntfAdapter->psAdapter->StopAllXaction == false)) { - index = atomic_read(&psIntfAdapter->uCurrTcb); - pTcb = &psIntfAdapter->asUsbTcb[index]; - pTcb->bUsed = TRUE; - pTcb->psIntfAdapter = psIntfAdapter; - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_TX, - NEXT_SEND, DBG_LVL_ALL, - "Got Tx desc %d used %d", - index, - atomic_read(&psIntfAdapter->uNumTcbUsed)); - index = (index + 1) % MAXIMUM_USB_TCB; - atomic_set(&psIntfAdapter->uCurrTcb, index); - atomic_inc(&psIntfAdapter->uNumTcbUsed); - } - return pTcb; -} - -static int TransmitTcb(struct bcm_interface_adapter *psIntfAdapter, - struct bcm_usb_tcb *pTcb, PVOID data, int len) -{ - - struct urb *urb = pTcb->urb; - int retval = 0; - - urb->transfer_buffer = usb_alloc_coherent(psIntfAdapter->udev, len, - GFP_ATOMIC, &urb->transfer_dma); - if (!urb->transfer_buffer) { - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0, - "Error allocating memory\n"); - return -ENOMEM; - } - memcpy(urb->transfer_buffer, data, len); - urb->transfer_buffer_length = len; - - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_TX, NEXT_SEND, - DBG_LVL_ALL, "Sending Bulk out packet\n"); - /* For T3B,INT OUT end point will be used as bulk out end point */ - if ((psIntfAdapter->psAdapter->chip_id == T3B) && - (psIntfAdapter->bHighSpeedDevice == TRUE)) { - usb_fill_int_urb(urb, psIntfAdapter->udev, - psIntfAdapter->sBulkOut.bulk_out_pipe, - urb->transfer_buffer, len, write_bulk_callback, pTcb, - psIntfAdapter->sBulkOut.int_out_interval); - } else { - usb_fill_bulk_urb(urb, psIntfAdapter->udev, - psIntfAdapter->sBulkOut.bulk_out_pipe, - urb->transfer_buffer, len, write_bulk_callback, pTcb); - } - urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; /* For DMA transfer */ - - if (false == psIntfAdapter->psAdapter->device_removed && - false == psIntfAdapter->psAdapter->bEndPointHalted && - false == psIntfAdapter->bSuspended && - false == psIntfAdapter->bPreparingForBusSuspend) { - retval = usb_submit_urb(urb, GFP_ATOMIC); - if (retval) { - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_TX, - NEXT_SEND, DBG_LVL_ALL, - "failed submitting write urb, error %d", - retval); - if (retval == -EPIPE) { - psIntfAdapter->psAdapter->bEndPointHalted = TRUE; - wake_up(&psIntfAdapter->psAdapter->tx_packet_wait_queue); - } - } - } - return retval; -} - -int InterfaceTransmitPacket(PVOID arg, PVOID data, UINT len) -{ - struct bcm_usb_tcb *pTcb = NULL; - struct bcm_interface_adapter *psIntfAdapter = arg; - - pTcb = GetBulkOutTcb(psIntfAdapter); - if (pTcb == NULL) { - BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0, - "No URB to transmit packet, dropping packet"); - return -EFAULT; - } - return TransmitTcb(psIntfAdapter, pTcb, data, len); -} - diff --git a/drivers/staging/bcm/InterfaceTx.h b/drivers/staging/bcm/InterfaceTx.h deleted file mode 100644 index 273147577c17..000000000000 --- a/drivers/staging/bcm/InterfaceTx.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _INTERFACE_TX_H -#define _INTERFACE_TX_H - -INT InterfaceTransmitPacket(PVOID arg, PVOID data, UINT len); - -#endif - diff --git a/drivers/staging/bcm/Ioctl.h b/drivers/staging/bcm/Ioctl.h deleted file mode 100644 index fa5f8671612e..000000000000 --- a/drivers/staging/bcm/Ioctl.h +++ /dev/null @@ -1,226 +0,0 @@ -#ifndef _IOCTL_H_ -#define _IOCTL_H_ - -struct bcm_rdm_buffer { - unsigned long Register; - unsigned long Length; -} __packed; - -struct bcm_wrm_buffer { - unsigned long Register; - unsigned long Length; - unsigned char Data[4]; -} __packed; - -struct bcm_ioctl_buffer { - void __user *InputBuffer; - unsigned long InputLength; - void __user *OutputBuffer; - unsigned long OutputLength; -} __packed; - -struct bcm_gpio_info { - unsigned int uiGpioNumber; /* valid numbers 0-15 */ - unsigned int uiGpioValue; /* 1 set ; 0 not set */ -} __packed; - -struct bcm_user_thread_req { - /* 0->Inactivate LED thread. */ - /* 1->Activate the LED thread */ - unsigned int ThreadState; -} __packed; - -#define LED_THREAD_ACTIVATION_REQ 1 -#define BCM_IOCTL 'k' -#define IOCTL_SEND_CONTROL_MESSAGE _IOW(BCM_IOCTL, 0x801, int) -#define IOCTL_BCM_REGISTER_WRITE _IOW(BCM_IOCTL, 0x802, int) -#define IOCTL_BCM_REGISTER_READ _IOR(BCM_IOCTL, 0x803, int) -#define IOCTL_BCM_COMMON_MEMORY_WRITE _IOW(BCM_IOCTL, 0x804, int) -#define IOCTL_BCM_COMMON_MEMORY_READ _IOR(BCM_IOCTL, 0x805, int) -#define IOCTL_GET_CONTROL_MESSAGE _IOR(BCM_IOCTL, 0x806, int) -#define IOCTL_BCM_FIRMWARE_DOWNLOAD _IOW(BCM_IOCTL, 0x807, int) -#define IOCTL_BCM_SET_SEND_VCID _IOW(BCM_IOCTL, 0x808, int) -#define IOCTL_BCM_SWITCH_TRANSFER_MODE _IOW(BCM_IOCTL, 0x809, int) -#define IOCTL_LINK_REQ _IOW(BCM_IOCTL, 0x80A, int) -#define IOCTL_RSSI_LEVEL_REQ _IOW(BCM_IOCTL, 0x80B, int) -#define IOCTL_IDLE_REQ _IOW(BCM_IOCTL, 0x80C, int) -#define IOCTL_SS_INFO_REQ _IOW(BCM_IOCTL, 0x80D, int) -#define IOCTL_GET_STATISTICS_POINTER _IOW(BCM_IOCTL, 0x80E, int) -#define IOCTL_CM_REQUEST _IOW(BCM_IOCTL, 0x80F, int) -#define IOCTL_INIT_PARAM_REQ _IOW(BCM_IOCTL, 0x810, int) -#define IOCTL_MAC_ADDR_REQ _IOW(BCM_IOCTL, 0x811, int) -#define IOCTL_MAC_ADDR_RESP _IOWR(BCM_IOCTL, 0x812, int) -#define IOCTL_CLASSIFICATION_RULE _IOW(BCM_IOCTL, 0x813, char) -#define IOCTL_CLOSE_NOTIFICATION _IO(BCM_IOCTL, 0x814) -#define IOCTL_LINK_UP _IO(BCM_IOCTL, 0x815) -#define IOCTL_LINK_DOWN _IO(BCM_IOCTL, 0x816, struct bcm_ioctl_buffer) -#define IOCTL_CHIP_RESET _IO(BCM_IOCTL, 0x816) -#define IOCTL_CINR_LEVEL_REQ _IOW(BCM_IOCTL, 0x817, char) -#define IOCTL_WTM_CONTROL_REQ _IOW(BCM_IOCTL, 0x817, char) -#define IOCTL_BE_BUCKET_SIZE _IOW(BCM_IOCTL, 0x818, unsigned long) -#define IOCTL_RTPS_BUCKET_SIZE _IOW(BCM_IOCTL, 0x819, unsigned long) -#define IOCTL_QOS_THRESHOLD _IOW(BCM_IOCTL, 0x820, unsigned long) -#define IOCTL_DUMP_PACKET_INFO _IO(BCM_IOCTL, 0x821) -#define IOCTL_GET_PACK_INFO _IOR(BCM_IOCTL, 0x823, int) -#define IOCTL_BCM_GET_DRIVER_VERSION _IOR(BCM_IOCTL, 0x829, int) -#define IOCTL_BCM_GET_CURRENT_STATUS _IOW(BCM_IOCTL, 0x828, int) -#define IOCTL_BCM_GPIO_SET_REQUEST _IOW(BCM_IOCTL, 0x82A, int) -#define IOCTL_BCM_GPIO_STATUS_REQUEST _IOW(BCM_IOCTL, 0x82b, int) -#define IOCTL_BCM_GET_DSX_INDICATION _IOR(BCM_IOCTL, 0x854, int) -#define IOCTL_BCM_BUFFER_DOWNLOAD_START _IOW(BCM_IOCTL, 0x855, int) -#define IOCTL_BCM_BUFFER_DOWNLOAD _IOW(BCM_IOCTL, 0x856, int) -#define IOCTL_BCM_BUFFER_DOWNLOAD_STOP _IOW(BCM_IOCTL, 0x857, int) -#define IOCTL_BCM_REGISTER_WRITE_PRIVATE _IOW(BCM_IOCTL, 0x826, char) -#define IOCTL_BCM_REGISTER_READ_PRIVATE _IOW(BCM_IOCTL, 0x827, char) -#define IOCTL_BCM_SET_DEBUG _IOW(BCM_IOCTL, 0x824, struct bcm_ioctl_buffer) -#define IOCTL_BCM_EEPROM_REGISTER_WRITE _IOW(BCM_IOCTL, 0x858, int) -#define IOCTL_BCM_EEPROM_REGISTER_READ _IOR(BCM_IOCTL, 0x859, int) -#define IOCTL_BCM_WAKE_UP_DEVICE_FROM_IDLE _IOR(BCM_IOCTL, 0x860, int) -#define IOCTL_BCM_SET_MAC_TRACING _IOW(BCM_IOCTL, 0x82c, int) -#define IOCTL_BCM_GET_HOST_MIBS _IOW(BCM_IOCTL, 0x853, int) -#define IOCTL_BCM_NVM_READ _IOR(BCM_IOCTL, 0x861, int) -#define IOCTL_BCM_NVM_WRITE _IOW(BCM_IOCTL, 0x862, int) -#define IOCTL_BCM_GET_NVM_SIZE _IOR(BCM_IOCTL, 0x863, int) -#define IOCTL_BCM_CAL_INIT _IOR(BCM_IOCTL, 0x864, int) -#define IOCTL_BCM_BULK_WRM _IOW(BCM_IOCTL, 0x90B, int) -#define IOCTL_BCM_FLASH2X_SECTION_READ _IOR(BCM_IOCTL, 0x865, int) -#define IOCTL_BCM_FLASH2X_SECTION_WRITE _IOW(BCM_IOCTL, 0x866, int) -#define IOCTL_BCM_GET_FLASH2X_SECTION_BITMAP _IOR(BCM_IOCTL, 0x867, int) -#define IOCTL_BCM_SET_ACTIVE_SECTION _IOW(BCM_IOCTL, 0x868, int) -#define IOCTL_BCM_IDENTIFY_ACTIVE_SECTION _IO(BCM_IOCTL, 0x869) -#define IOCTL_BCM_COPY_SECTION _IOW(BCM_IOCTL, 0x870, int) -#define IOCTL_BCM_GET_FLASH_CS_INFO _IOR(BCM_IOCTL, 0x871, int) -#define IOCTL_BCM_SELECT_DSD _IOW(BCM_IOCTL, 0x872, int) -#define IOCTL_BCM_NVM_RAW_READ _IOR(BCM_IOCTL, 0x875, int) -#define IOCTL_BCM_CNTRLMSG_MASK _IOW(BCM_IOCTL, 0x874, int) -#define IOCTL_BCM_GET_DEVICE_DRIVER_INFO _IOR(BCM_IOCTL, 0x877, int) -#define IOCTL_BCM_TIME_SINCE_NET_ENTRY _IOR(BCM_IOCTL, 0x876, int) -#define BCM_LED_THREAD_STATE_CHANGE_REQ _IOW(BCM_IOCTL, 0x878, int) -#define IOCTL_BCM_GPIO_MULTI_REQUEST _IOW(BCM_IOCTL, 0x82D, struct bcm_ioctl_buffer) -#define IOCTL_BCM_GPIO_MODE_REQUEST _IOW(BCM_IOCTL, 0x82E, struct bcm_ioctl_buffer) - -enum bcm_interface_type { - BCM_MII, - BCM_CARDBUS, - BCM_USB, - BCM_SDIO, - BCM_PCMCIA -}; - -struct bcm_driver_info { - enum bcm_nvm_type u32NVMType; - unsigned int MaxRDMBufferSize; - enum bcm_interface_type u32InterfaceType; - unsigned int u32DSDStartOffset; - unsigned int u32RxAlignmentCorrection; - unsigned int u32Reserved[10]; -}; - -struct bcm_nvm_readwrite { - void __user *pBuffer; - uint32_t uiOffset; - uint32_t uiNumBytes; - bool bVerify; -}; - -struct bcm_bulk_wrm_buffer { - unsigned long Register; - unsigned long SwapEndian; - unsigned long Values[1]; -}; - -enum bcm_flash2x_section_val { - NO_SECTION_VAL = 0, /* no section chosen when absolute offset is given for RD/WR */ - ISO_IMAGE1, - ISO_IMAGE2, - DSD0, - DSD1, - DSD2, - VSA0, - VSA1, - VSA2, - SCSI, - CONTROL_SECTION, - ISO_IMAGE1_PART2, - ISO_IMAGE1_PART3, - ISO_IMAGE2_PART2, - ISO_IMAGE2_PART3, - TOTAL_SECTIONS -}; - -/* - * Structure used for READ/WRITE Flash Map2.x - */ -struct bcm_flash2x_readwrite { - enum bcm_flash2x_section_val Section; /* section to be read/written */ - u32 offset; /* offset within section. */ - u32 numOfBytes; /* number of bytes from the offset */ - u32 bVerify; - void __user *pDataBuff; /* buffer for reading/writing */ -}; - -/* - * This structure is used for coping one section to other. - * there are two ways to copy one section to other. - * it NOB =0, complete section will be copied on to other. - * if NOB !=0, only NOB will be copied from the given offset. - */ - -struct bcm_flash2x_copy_section { - enum bcm_flash2x_section_val SrcSection; - enum bcm_flash2x_section_val DstSection; - u32 offset; - u32 numOfBytes; -}; - -/* - * This section provide the complete bitmap of the Flash. - * using this map lib/APP will issue read/write command. - * Fields are defined as : - * Bit [0] = section is present //1:present, 0: Not present - * Bit [1] = section is valid //1: valid, 0: not valid - * Bit [2] = Section is R/W //0: RW, 1: RO - * Bit [3] = Section is Active or not 1 means Active, 0->inactive - * Bit [7...3] = Reserved - */ - -struct bcm_flash2x_bitmap { - unsigned char ISO_IMAGE1; - unsigned char ISO_IMAGE2; - unsigned char DSD0; - unsigned char DSD1; - unsigned char DSD2; - unsigned char VSA0; - unsigned char VSA1; - unsigned char VSA2; - unsigned char SCSI; - unsigned char CONTROL_SECTION; - /* Reserved for future use */ - unsigned char Reserved0; - unsigned char Reserved1; - unsigned char Reserved2; -}; - -struct bcm_time_elapsed { - u64 ul64TimeElapsedSinceNetEntry; - u32 uiReserved[4]; -}; - -enum { - WIMAX_IDX = 0, /* To access WiMAX chip GPIO's for GPIO_MULTI_INFO or GPIO_MULTI_MODE */ - HOST_IDX, /* To access Host chip GPIO's for GPIO_MULTI_INFO or GPIO_MULTI_MODE */ - MAX_IDX -}; - -struct bcm_gpio_multi_info { - unsigned int uiGPIOCommand; /* 1 for set and 0 for get */ - unsigned int uiGPIOMask; /* set the corresponding bit to 1 to access GPIO */ - unsigned int uiGPIOValue; /* 0 or 1; value to be set when command is 1. */ -} __packed; - -struct bcm_gpio_multi_mode { - unsigned int uiGPIOMode; /* 1 for OUT mode, 0 for IN mode */ - unsigned int uiGPIOMask; /* GPIO mask to set mode */ -} __packed; - -#endif diff --git a/drivers/staging/bcm/Kconfig b/drivers/staging/bcm/Kconfig deleted file mode 100644 index 8acf4b24a7c9..000000000000 --- a/drivers/staging/bcm/Kconfig +++ /dev/null @@ -1,6 +0,0 @@ -config BCM_WIMAX - tristate "Beceem BCS200/BCS220-3 and BCSM250 wimax support" - depends on USB && NET - help - This is an experimental driver for the Beceem WIMAX chipset used - by Sprint 4G. diff --git a/drivers/staging/bcm/LeakyBucket.c b/drivers/staging/bcm/LeakyBucket.c deleted file mode 100644 index 86b79d6438d4..000000000000 --- a/drivers/staging/bcm/LeakyBucket.c +++ /dev/null @@ -1,363 +0,0 @@ -/********************************************************************** -* LEAKYBUCKET.C -* This file contains the routines related to Leaky Bucket Algorithm. -***********************************************************************/ -#include "headers.h" - -/** - * UpdateTokenCount() - Calculates the token count for each channel - * and updates the same in Adapter structure - * @Adapter: Pointer to the Adapter structure. - * - * Return: None - */ -static VOID UpdateTokenCount(register struct bcm_mini_adapter *Adapter) -{ - ULONG liCurrentTime; - INT i = 0; - struct timeval tv; - struct bcm_packet_info *curr_pi; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, - "=====>\n"); - if (NULL == Adapter) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, - DBG_LVL_ALL, "Adapter found NULL!\n"); - return; - } - - do_gettimeofday(&tv); - for (i = 0; i < NO_OF_QUEUES; i++) { - curr_pi = &Adapter->PackInfo[i]; - - if (TRUE == curr_pi->bValid && (1 == curr_pi->ucDirection)) { - liCurrentTime = ((tv.tv_sec - - curr_pi->stLastUpdateTokenAt.tv_sec)*1000 + - (tv.tv_usec - curr_pi->stLastUpdateTokenAt.tv_usec) / - 1000); - if (0 != liCurrentTime) { - curr_pi->uiCurrentTokenCount += (ULONG) - ((curr_pi->uiMaxAllowedRate) * - ((ULONG)((liCurrentTime)))/1000); - memcpy(&curr_pi->stLastUpdateTokenAt, &tv, - sizeof(struct timeval)); - curr_pi->liLastUpdateTokenAt = liCurrentTime; - if (curr_pi->uiCurrentTokenCount >= - curr_pi->uiMaxBucketSize) { - curr_pi->uiCurrentTokenCount = - curr_pi->uiMaxBucketSize; - } - } - } - } - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, - "<=====\n"); -} - - -/** - * IsPacketAllowedForFlow() - This function checks whether the given - * packet from the specified queue can be allowed for transmission by - * checking the token count. - * @Adapter: Pointer to the Adpater structure. - * @iQIndex: The queue Identifier. - * @ulPacketLength: Number of bytes to be transmitted. - * - * Returns: The number of bytes allowed for transmission. - */ -static ULONG GetSFTokenCount(struct bcm_mini_adapter *Adapter, struct bcm_packet_info *psSF) -{ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, - "IsPacketAllowedForFlow ===>"); - - /* Validate the parameters */ - if (NULL == Adapter || (psSF < Adapter->PackInfo && - (uintptr_t)psSF > (uintptr_t) &Adapter->PackInfo[HiPriority])) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, - "IPAFF: Got wrong Parameters:Adapter: %p, QIndex: %zd\n", - Adapter, (psSF-Adapter->PackInfo)); - return 0; - } - - if (false != psSF->bValid && psSF->ucDirection) { - if (0 != psSF->uiCurrentTokenCount) { - return psSF->uiCurrentTokenCount; - } - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, - DBG_LVL_ALL, - "Not enough tokens in queue %zd Available %u\n", - psSF-Adapter->PackInfo, psSF->uiCurrentTokenCount); - psSF->uiPendedLast = 1; - } else { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, - "IPAFF: Queue %zd not valid\n", - psSF-Adapter->PackInfo); - } - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, - "IsPacketAllowedForFlow <==="); - return 0; -} - -/** -@ingroup tx_functions -This function despatches packet from the specified queue. -@return Zero(success) or Negative value(failure) -*/ -static INT SendPacketFromQueue(struct bcm_mini_adapter *Adapter,/**"); - if (!Adapter || !Packet || !psSF) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, SEND_QUEUE, DBG_LVL_ALL, - "Got NULL Adapter or Packet"); - return -EINVAL; - } - - if (psSF->liDrainCalculated == 0) - psSF->liDrainCalculated = jiffies; - /* send the packet to the fifo.. */ - PktLen = Packet->len; - Status = SetupNextSend(Adapter, Packet, psSF->usVCID_Value); - if (Status == 0) { - for (uiIndex = 0; uiIndex < MIBS_MAX_HIST_ENTRIES; uiIndex++) { - if ((PktLen <= MIBS_PKTSIZEHIST_RANGE*(uiIndex+1)) && - (PktLen > MIBS_PKTSIZEHIST_RANGE*(uiIndex))) - Adapter->aTxPktSizeHist[uiIndex]++; - } - } - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, SEND_QUEUE, DBG_LVL_ALL, - "<====="); - return Status; -} - -static void get_data_packet(struct bcm_mini_adapter *ad, - struct bcm_packet_info *ps_sf) -{ - int packet_len; - struct sk_buff *qpacket; - - if (!ps_sf->ucDirection) - return; - - BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, - "UpdateTokenCount "); - if (ad->IdleMode || ad->bPreparingForLowPowerMode) - return; /* in idle mode */ - - /* Check for Free Descriptors */ - if (atomic_read(&ad->CurrNumFreeTxDesc) <= - MINIMUM_PENDING_DESCRIPTORS) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, - " No Free Tx Descriptor(%d) is available for Data pkt..", - atomic_read(&ad->CurrNumFreeTxDesc)); - return; - } - - spin_lock_bh(&ps_sf->SFQueueLock); - qpacket = ps_sf->FirstTxQueue; - - if (qpacket) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, - "Dequeuing Data Packet"); - - if (ps_sf->bEthCSSupport) - packet_len = qpacket->len; - else - packet_len = qpacket->len - ETH_HLEN; - - packet_len <<= 3; - if (packet_len <= GetSFTokenCount(ad, ps_sf)) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, TX_PACKETS, - DBG_LVL_ALL, "Allowed bytes %d", - (packet_len >> 3)); - - DEQUEUEPACKET(ps_sf->FirstTxQueue, ps_sf->LastTxQueue); - ps_sf->uiCurrentBytesOnHost -= (qpacket->len); - ps_sf->uiCurrentPacketsOnHost--; - atomic_dec(&ad->TotalPacketCount); - spin_unlock_bh(&ps_sf->SFQueueLock); - - SendPacketFromQueue(ad, ps_sf, qpacket); - ps_sf->uiPendedLast = false; - } else { - BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, TX_PACKETS, - DBG_LVL_ALL, "For Queue: %zd\n", - ps_sf - ad->PackInfo); - BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, TX_PACKETS, - DBG_LVL_ALL, - "\nAvailable Tokens = %d required = %d\n", - ps_sf->uiCurrentTokenCount, - packet_len); - /* - this part indicates that because of - non-availability of the tokens - pkt has not been send out hence setting the - pending flag indicating the host to send it out - first next iteration. - */ - ps_sf->uiPendedLast = TRUE; - spin_unlock_bh(&ps_sf->SFQueueLock); - } - } else { - spin_unlock_bh(&ps_sf->SFQueueLock); - } -} - -static void send_control_packet(struct bcm_mini_adapter *ad, - struct bcm_packet_info *ps_sf) -{ - char *ctrl_packet = NULL; - INT status = 0; - - if ((atomic_read(&ad->CurrNumFreeTxDesc) > 0) && - (atomic_read(&ad->index_rd_txcntrlpkt) != - atomic_read(&ad->index_wr_txcntrlpkt))) { - ctrl_packet = ad->txctlpacket - [(atomic_read(&ad->index_rd_txcntrlpkt)%MAX_CNTRL_PKTS)]; - if (ctrl_packet) { - BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, TX_PACKETS, - DBG_LVL_ALL, - "Sending Control packet"); - status = SendControlPacket(ad, ctrl_packet); - if (STATUS_SUCCESS == status) { - spin_lock_bh(&ps_sf->SFQueueLock); - ps_sf->NumOfPacketsSent++; - ps_sf->uiSentBytes += ((struct bcm_leader *)ctrl_packet)->PLength; - ps_sf->uiSentPackets++; - atomic_dec(&ad->TotalPacketCount); - ps_sf->uiCurrentBytesOnHost -= ((struct bcm_leader *)ctrl_packet)->PLength; - ps_sf->uiCurrentPacketsOnHost--; - atomic_inc(&ad->index_rd_txcntrlpkt); - spin_unlock_bh(&ps_sf->SFQueueLock); - } else { - BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, TX_PACKETS, - DBG_LVL_ALL, - "SendControlPacket Failed\n"); - } - } else { - BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, TX_PACKETS, - DBG_LVL_ALL, - " Control Pkt is not available, Indexing is wrong...."); - } - } -} - -/** - * CheckAndSendPacketFromIndex() - This function dequeues the - * data/control packet from the specified queue for transmission. - * @Adapter: Pointer to the driver control structure. - * @iQIndex: The queue Identifier. - * - * Returns: None. - */ -static VOID CheckAndSendPacketFromIndex(struct bcm_mini_adapter *Adapter, - struct bcm_packet_info *psSF) -{ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, - "%zd ====>", (psSF-Adapter->PackInfo)); - if ((psSF != &Adapter->PackInfo[HiPriority]) && - Adapter->LinkUpStatus && - atomic_read(&psSF->uiPerSFTxResourceCount)) { /* Get data packet */ - - get_data_packet(Adapter, psSF); - } else { - send_control_packet(Adapter, psSF); - } -} - - -/** - * transmit_packets() - This function transmits the packets from - * different queues, if free descriptors are available on target. - * @Adapter: Pointer to the Adapter structure. - * - * Returns: None. - */ -VOID transmit_packets(struct bcm_mini_adapter *Adapter) -{ - UINT uiPrevTotalCount = 0; - int iIndex = 0; - - bool exit_flag = TRUE; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, - "=====>"); - - if (NULL == Adapter) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, - "Got NULL Adapter"); - return; - } - if (Adapter->device_removed == TRUE) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, - "Device removed"); - return; - } - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, - "\nUpdateTokenCount ====>\n"); - - UpdateTokenCount(Adapter); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, - "\nPruneQueueAllSF ====>\n"); - - PruneQueueAllSF(Adapter); - - uiPrevTotalCount = atomic_read(&Adapter->TotalPacketCount); - - for (iIndex = HiPriority; iIndex >= 0; iIndex--) { - if (!uiPrevTotalCount || (TRUE == Adapter->device_removed)) - break; - - if (Adapter->PackInfo[iIndex].bValid && - Adapter->PackInfo[iIndex].uiPendedLast && - Adapter->PackInfo[iIndex].uiCurrentBytesOnHost) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, - DBG_LVL_ALL, - "Calling CheckAndSendPacketFromIndex.."); - CheckAndSendPacketFromIndex(Adapter, - &Adapter->PackInfo[iIndex]); - uiPrevTotalCount--; - } - } - - while (uiPrevTotalCount > 0 && !Adapter->device_removed) { - exit_flag = TRUE; - /* second iteration to parse non-pending queues */ - for (iIndex = HiPriority; iIndex >= 0; iIndex--) { - if (!uiPrevTotalCount || - (TRUE == Adapter->device_removed)) - break; - - if (Adapter->PackInfo[iIndex].bValid && - Adapter->PackInfo[iIndex].uiCurrentBytesOnHost && - !Adapter->PackInfo[iIndex].uiPendedLast) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, - TX_PACKETS, DBG_LVL_ALL, - "Calling CheckAndSendPacketFromIndex.."); - CheckAndSendPacketFromIndex(Adapter, &Adapter->PackInfo[iIndex]); - uiPrevTotalCount--; - exit_flag = false; - } - } - - if (Adapter->IdleMode || Adapter->bPreparingForLowPowerMode) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, - DBG_LVL_ALL, "In Idle Mode\n"); - break; - } - if (exit_flag == TRUE) - break; - } /* end of inner while loop */ - - update_per_cid_rx(Adapter); - Adapter->txtransmit_running = 0; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, - "<======"); -} diff --git a/drivers/staging/bcm/Macros.h b/drivers/staging/bcm/Macros.h deleted file mode 100644 index dc01e3016d4f..000000000000 --- a/drivers/staging/bcm/Macros.h +++ /dev/null @@ -1,352 +0,0 @@ -/************************************* -* Macros.h -**************************************/ -#ifndef __MACROS_H__ -#define __MACROS_H__ - -#define TX_TIMER_PERIOD 10 /*10 msec*/ -#define MAX_CLASSIFIERS 100 -#define MAX_TARGET_DSX_BUFFERS 24 - -#define MAX_CNTRL_PKTS 100 -#define MAX_DATA_PKTS 200 -#define MAX_ETH_SIZE 1536 -#define MAX_CNTL_PKT_SIZE 2048 - -#define MTU_SIZE 1400 -#define TX_QLEN 5 - -#define MAC_ADDR_REGISTER 0xbf60d000 - - -/* Quality of Service */ -#define NO_OF_QUEUES 17 -#define HiPriority (NO_OF_QUEUES-1) -#define LowPriority 0 -#define BE 2 -#define rtPS 4 -#define ERTPS 5 -#define UGS 6 - -#define BE_BUCKET_SIZE (1024*1024*100) /* 32kb */ -#define rtPS_BUCKET_SIZE (1024*1024*100) /* 8kb */ -#define MAX_ALLOWED_RATE (1024*1024*100) -#define TX_PACKET_THRESHOLD 10 -#define XSECONDS (1*HZ) -#define DSC_ACTIVATE_REQUEST 248 -#define QUEUE_DEPTH_OFFSET 0x1fc01000 -#define MAX_DEVICE_DESC_SIZE 2040 -#define MAX_CTRL_QUEUE_LEN 100 -#define MAX_APP_QUEUE_LEN 200 -#define MAX_LATENCY_ALLOWED 0xFFFFFFFF -#define DEFAULT_UG_INTERVAL 250 -#define DEFAULT_UGI_FACTOR 4 - -#define DEFAULT_PERSFCOUNT 60 -#define MAX_CONNECTIONS 10 -#define MAX_CLASS_NAME_LENGTH 32 - -#define ETH_LENGTH_OF_ADDRESS 6 -#define MAX_MULTICAST_ADDRESSES 32 -#define IP_LENGTH_OF_ADDRESS 4 - -#define IP_PACKET_ONLY_MODE 0 -#define ETH_PACKET_TUNNELING_MODE 1 - -/* Link Request */ -#define SET_MAC_ADDRESS_REQUEST 0 -#define SYNC_UP_REQUEST 1 -#define SYNCED_UP 2 -#define LINK_UP_REQUEST 3 -#define LINK_CONNECTED 4 -#define SYNC_UP_NOTIFICATION 2 -#define LINK_UP_NOTIFICATION 4 - - -#define LINK_NET_ENTRY 0x0002 -#define HMC_STATUS 0x0004 -#define LINK_UP_CONTROL_REQ 0x83 - -#define STATS_POINTER_REQ_STATUS 0x86 -#define NETWORK_ENTRY_REQ_PAYLOAD 198 -#define LINK_DOWN_REQ_PAYLOAD 226 -#define SYNC_UP_REQ_PAYLOAD 228 -#define STATISTICS_POINTER_REQ 237 -#define LINK_UP_REQ_PAYLOAD 245 -#define LINK_UP_ACK 246 - -#define STATS_MSG_SIZE 4 -#define INDEX_TO_DATA 4 - -#define GO_TO_IDLE_MODE_PAYLOAD 210 -#define COME_UP_FROM_IDLE_MODE_PAYLOAD 211 -#define IDLE_MODE_SF_UPDATE_MSG 187 - -#define SKB_RESERVE_ETHERNET_HEADER 16 -#define SKB_RESERVE_PHS_BYTES 32 - -#define IP_PACKET_ONLY_MODE 0 -#define ETH_PACKET_TUNNELING_MODE 1 - -#define ETH_CS_802_3 1 -#define ETH_CS_802_1Q_VLAN 3 -#define IPV4_CS 1 -#define IPV6_CS 2 -#define ETH_CS_MASK 0x3f - -/** \brief Validity bit maps for TLVs in packet classification rule */ - -#define PKT_CLASSIFICATION_USER_PRIORITY_VALID 0 -#define PKT_CLASSIFICATION_VLANID_VALID 1 - -#ifndef MIN -#define MIN(_a, _b) ((_a) < (_b) ? (_a) : (_b)) -#endif - - -/*Leader related terms */ -#define LEADER_STATUS 0x00 -#define LEADER_STATUS_TCP_ACK 0x1 -#define LEADER_SIZE sizeof(struct bcm_leader) -#define MAC_ADDR_REQ_SIZE sizeof(struct bcm_packettosend) -#define SS_INFO_REQ_SIZE sizeof(struct bcm_packettosend) -#define CM_REQUEST_SIZE (LEADER_SIZE + sizeof(stLocalSFChangeRequest)) -#define IDLE_REQ_SIZE sizeof(struct bcm_packettosend) - - -#define MAX_TRANSFER_CTRL_BYTE_USB (2*1024) - -#define GET_MAILBOX1_REG_REQUEST 0x87 -#define GET_MAILBOX1_REG_RESPONSE 0x67 -#define VCID_CONTROL_PACKET 0x00 - -#define TRANSMIT_NETWORK_DATA 0x00 -#define RECEIVED_NETWORK_DATA 0x20 - -#define CM_RESPONSES 0xA0 -#define STATUS_RSP 0xA1 -#define LINK_CONTROL_RESP 0xA2 -#define IDLE_MODE_STATUS 0xA3 -#define STATS_POINTER_RESP 0xA6 -#define MGMT_MSG_INFO_SW_STATUS 0xA7 -#define AUTH_SS_HOST_MSG 0xA8 - -#define CM_DSA_ACK_PAYLOAD 247 -#define CM_DSC_ACK_PAYLOAD 248 -#define CM_DSD_ACK_PAYLOAD 249 -#define CM_DSDEACTVATE 250 -#define TOTAL_MASKED_ADDRESS_IN_BYTES 32 - -#define MAC_REQ 0 -#define LINK_RESP 1 -#define RSSI_INDICATION 2 - -#define SS_INFO 4 -#define STATISTICS_INFO 5 -#define CM_INDICATION 6 -#define PARAM_RESP 7 -#define BUFFER_1K 1024 -#define BUFFER_2K (BUFFER_1K*2) -#define BUFFER_4K (BUFFER_2K*2) -#define BUFFER_8K (BUFFER_4K*2) -#define BUFFER_16K (BUFFER_8K*2) -#define DOWNLINK_DIR 0 -#define UPLINK_DIR 1 - -#define BCM_SIGNATURE "BECEEM" - - -#define GPIO_OUTPUT_REGISTER 0x0F00003C -#define BCM_GPIO_OUTPUT_SET_REG 0x0F000040 -#define BCM_GPIO_OUTPUT_CLR_REG 0x0F000044 -#define GPIO_MODE_REGISTER 0x0F000034 -#define GPIO_PIN_STATE_REGISTER 0x0F000038 - -struct bcm_link_state { - unsigned char ucLinkStatus; - unsigned char bIdleMode; - unsigned char bShutdownMode; -}; - -enum enLinkStatus { - WAIT_FOR_SYNC = 1, - PHY_SYNC_ACHIVED = 2, - LINKUP_IN_PROGRESS = 3, - LINKUP_DONE = 4, - DREG_RECEIVED = 5, - LINK_STATUS_RESET_RECEIVED = 6, - PERIODIC_WAKE_UP_NOTIFICATION_FRM_FW = 7, - LINK_SHUTDOWN_REQ_FROM_FIRMWARE = 8, - COMPLETE_WAKE_UP_NOTIFICATION_FRM_FW = 9 -}; - -enum bcm_phs_dsc_action { - eAddPHSRule = 0, - eSetPHSRule, - eDeletePHSRule, - eDeleteAllPHSRules -}; - -#define CM_CONTROL_NEWDSX_MULTICLASSIFIER_REQ 0x89 /* Host to Mac */ -#define CM_CONTROL_NEWDSX_MULTICLASSIFIER_RESP 0xA9 /* Mac to Host */ -#define MASK_DISABLE_HEADER_SUPPRESSION 0x10 /* 0b000010000 */ -#define MINIMUM_PENDING_DESCRIPTORS 5 - -#define SHUTDOWN_HOSTINITIATED_REQUESTPAYLOAD 0xCC -#define SHUTDOWN_ACK_FROM_DRIVER 0x1 -#define SHUTDOWN_NACK_FROM_DRIVER 0x2 - -#define LINK_SYNC_UP_SUBTYPE 0x0001 -#define LINK_SYNC_DOWN_SUBTYPE 0x0001 - - - -#define CONT_MODE 1 -#define SINGLE_DESCRIPTOR 1 - - -#define DESCRIPTOR_LENGTH 0x30 -#define FIRMWARE_DESCS_ADDRESS 0x1F100000 - - -#define CLOCK_RESET_CNTRL_REG_1 0x0F00000C -#define CLOCK_RESET_CNTRL_REG_2 0x0F000840 - - - -#define TX_DESCRIPTOR_HEAD_REGISTER 0x0F010034 -#define RX_DESCRIPTOR_HEAD_REGISTER 0x0F010094 - -#define STATISTICS_BEGIN_ADDR 0xbf60f02c - -#define MAX_PENDING_CTRL_PACKET (MAX_CTRL_QUEUE_LEN-10) - -#define WIMAX_MAX_MTU (MTU_SIZE + ETH_HLEN) -#define AUTO_LINKUP_ENABLE 0x2 -#define AUTO_SYNC_DISABLE 0x1 -#define AUTO_FIRM_DOWNLOAD 0x1 -#define SETTLE_DOWN_TIME 50 - -#define HOST_BUS_SUSPEND_BIT 16 - -#define IDLE_MESSAGE 0x81 - -#define MIPS_CLOCK_133MHz 1 - -#define TARGET_CAN_GO_TO_IDLE_MODE 2 -#define TARGET_CAN_NOT_GO_TO_IDLE_MODE 3 -#define IDLE_MODE_PAYLOAD_LENGTH 8 - -#define IP_HEADER(Buffer) ((IPHeaderFormat *)(Buffer)) -#define IPV4 4 -#define IP_VERSION(byte) (((byte&0xF0)>>4)) - -#define SET_MAC_ADDRESS 193 -#define SET_MAC_ADDRESS_RESPONSE 236 - -#define IDLE_MODE_WAKEUP_PATTERN 0xd0ea1d1e -#define IDLE_MODE_WAKEUP_NOTIFIER_ADDRESS 0x1FC02FA8 -#define IDLE_MODE_MAX_RETRY_COUNT 1000 - -#define CONFIG_BEGIN_ADDR 0xBF60B000 - -#define FIRMWARE_BEGIN_ADDR 0xBFC00000 - -#define INVALID_QUEUE_INDEX NO_OF_QUEUES - -#define INVALID_PID ((pid_t)-1) -#define DDR_80_MHZ 0 -#define DDR_100_MHZ 1 -#define DDR_120_MHZ 2 /* Additional Frequency for T3LP */ -#define DDR_133_MHZ 3 -#define DDR_140_MHZ 4 /* Not Used (Reserved for future) */ -#define DDR_160_MHZ 5 /* Additional Frequency for T3LP */ -#define DDR_180_MHZ 6 /* Not Used (Reserved for future) */ -#define DDR_200_MHZ 7 /* Not Used (Reserved for future) */ - -#define MIPS_200_MHZ 0 -#define MIPS_160_MHZ 1 - -#define PLL_800_MHZ 0 -#define PLL_266_MHZ 1 - -#define DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING 0 -#define DEVICE_POWERSAVE_MODE_AS_PMU_CLOCK_GATING 1 -#define DEVICE_POWERSAVE_MODE_AS_PMU_SHUTDOWN 2 -#define DEVICE_POWERSAVE_MODE_AS_RESERVED 3 -#define DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE 4 - - -#define EEPROM_REJECT_REG_1 0x0f003018 -#define EEPROM_REJECT_REG_2 0x0f00301c -#define EEPROM_REJECT_REG_3 0x0f003008 -#define EEPROM_REJECT_REG_4 0x0f003020 -#define EEPROM_REJECT_MASK 0x0fffffff -#define VSG_MODE 0x3 - -/* Idle Mode Related Registers */ -#define DEBUG_INTERRUPT_GENERATOR_REGISTOR 0x0F00007C -#define SW_ABORT_IDLEMODE_LOC 0x0FF01FFC - -#define SW_ABORT_IDLEMODE_PATTERN 0xd0ea1d1e -#define DEVICE_INT_OUT_EP_REG0 0x0F011870 -#define DEVICE_INT_OUT_EP_REG1 0x0F011874 - -#define BIN_FILE "/lib/firmware/macxvi200.bin" -#define CFG_FILE "/lib/firmware/macxvi.cfg" -#define SF_MAX_ALLOWED_PACKETS_TO_BACKUP 128 -#define MIN_VAL(x, y) ((x) < (y) ? (x) : (y)) -#define MAC_ADDRESS_SIZE 6 -#define EEPROM_COMMAND_Q_REG 0x0F003018 -#define EEPROM_READ_DATA_Q_REG 0x0F003020 -#define CHIP_ID_REG 0x0F000000 -#define GPIO_MODE_REG 0x0F000034 -#define GPIO_OUTPUT_REG 0x0F00003C -#define WIMAX_MAX_ALLOWED_RATE (1024*1024*50) - -#define T3 0xbece0300 -#define TARGET_SFID_TXDESC_MAP_LOC 0xBFFFF400 - -#define RWM_READ 0 -#define RWM_WRITE 1 - -#define T3LPB 0xbece3300 -#define BCS220_2 0xbece3311 -#define BCS220_2BC 0xBECE3310 -#define BCS250_BC 0xbece3301 -#define BCS220_3 0xbece3321 - - -#define HPM_CONFIG_LDO145 0x0F000D54 -#define HPM_CONFIG_MSW 0x0F000D58 - -#define T3B 0xbece0310 -enum bcm_nvm_type { - NVM_AUTODETECT = 0, - NVM_EEPROM, - NVM_FLASH, - NVM_UNKNOWN -}; - -enum bcm_pmu_modes { - HYBRID_MODE_7C = 0, - INTERNAL_MODE_6 = 1, - HYBRID_MODE_6 = 2 -}; - -#define MAX_RDM_WRM_RETIRES 1 - -enum eAbortPattern { - ABORT_SHUTDOWN_MODE = 1, - ABORT_IDLE_REG = 1, - ABORT_IDLE_MODE = 2, - ABORT_IDLE_SYNCDOWN = 3 -}; - - -/* Offsets used by driver in skb cb variable */ -#define SKB_CB_CLASSIFICATION_OFFSET 0 -#define SKB_CB_LATENCY_OFFSET 1 -#define SKB_CB_TCPACK_OFFSET 2 - -#endif /* __MACROS_H__ */ diff --git a/drivers/staging/bcm/Makefile b/drivers/staging/bcm/Makefile deleted file mode 100644 index 652b7f87737c..000000000000 --- a/drivers/staging/bcm/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -# -# Makefile for Beceem USB Wimax card -# - -obj-$(CONFIG_BCM_WIMAX) += bcm_wimax.o - -bcm_wimax-y := InterfaceDld.o InterfaceIdleMode.o InterfaceInit.o InterfaceRx.o \ - InterfaceIsr.o InterfaceMisc.o InterfaceTx.o \ - CmHost.o IPv6Protocol.o Qos.o Transmit.o\ - Bcmnet.o DDRInit.o HandleControlPacket.o\ - LeakyBucket.o Misc.o sort.o Bcmchar.o hostmibs.o PHSModule.o\ - led_control.o nvm.o vendorspecificextn.o diff --git a/drivers/staging/bcm/Misc.c b/drivers/staging/bcm/Misc.c deleted file mode 100644 index 883f7394dee6..000000000000 --- a/drivers/staging/bcm/Misc.c +++ /dev/null @@ -1,1587 +0,0 @@ -#include "headers.h" - -static int BcmFileDownload(struct bcm_mini_adapter *Adapter, const char *path, unsigned int loc); -static void doPowerAutoCorrection(struct bcm_mini_adapter *psAdapter); -static void HandleShutDownModeRequest(struct bcm_mini_adapter *Adapter, PUCHAR pucBuffer); -static int bcm_parse_target_params(struct bcm_mini_adapter *Adapter); -static void beceem_protocol_reset(struct bcm_mini_adapter *Adapter); - -static void default_wimax_protocol_initialize(struct bcm_mini_adapter *Adapter) -{ - unsigned int uiLoopIndex; - - for (uiLoopIndex = 0; uiLoopIndex < NO_OF_QUEUES-1; uiLoopIndex++) { - Adapter->PackInfo[uiLoopIndex].uiThreshold = TX_PACKET_THRESHOLD; - Adapter->PackInfo[uiLoopIndex].uiMaxAllowedRate = MAX_ALLOWED_RATE; - Adapter->PackInfo[uiLoopIndex].uiMaxBucketSize = 20*1024*1024; - } - - Adapter->BEBucketSize = BE_BUCKET_SIZE; - Adapter->rtPSBucketSize = rtPS_BUCKET_SIZE; - Adapter->LinkStatus = SYNC_UP_REQUEST; - Adapter->TransferMode = IP_PACKET_ONLY_MODE; - Adapter->usBestEffortQueueIndex = -1; -} - -int InitAdapter(struct bcm_mini_adapter *psAdapter) -{ - int i = 0; - int Status = STATUS_SUCCESS; - - BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Initialising Adapter = %p", psAdapter); - - if (psAdapter == NULL) { - BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Adapter is NULL"); - return -EINVAL; - } - - sema_init(&psAdapter->NVMRdmWrmLock, 1); - sema_init(&psAdapter->rdmwrmsync, 1); - spin_lock_init(&psAdapter->control_queue_lock); - spin_lock_init(&psAdapter->txtransmitlock); - sema_init(&psAdapter->RxAppControlQueuelock, 1); - sema_init(&psAdapter->fw_download_sema, 1); - sema_init(&psAdapter->LowPowerModeSync, 1); - - for (i = 0; i < NO_OF_QUEUES; i++) - spin_lock_init(&psAdapter->PackInfo[i].SFQueueLock); - i = 0; - - init_waitqueue_head(&psAdapter->process_rx_cntrlpkt); - init_waitqueue_head(&psAdapter->tx_packet_wait_queue); - init_waitqueue_head(&psAdapter->process_read_wait_queue); - init_waitqueue_head(&psAdapter->ioctl_fw_dnld_wait_queue); - init_waitqueue_head(&psAdapter->lowpower_mode_wait_queue); - psAdapter->waiting_to_fw_download_done = TRUE; - psAdapter->fw_download_done = false; - - default_wimax_protocol_initialize(psAdapter); - for (i = 0; i < MAX_CNTRL_PKTS; i++) { - psAdapter->txctlpacket[i] = kmalloc(MAX_CNTL_PKT_SIZE, GFP_KERNEL); - if (!psAdapter->txctlpacket[i]) { - BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "No More Cntl pkts got, max got is %d", i); - return -ENOMEM; - } - } - - if (AllocAdapterDsxBuffer(psAdapter)) { - BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Failed to allocate DSX buffers"); - return -EINVAL; - } - - /* Initialize PHS interface */ - if (phs_init(&psAdapter->stBCMPhsContext, psAdapter) != 0) { - BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "%s:%s:%d:Error PHS Init Failed=====>\n", __FILE__, __func__, __LINE__); - return -ENOMEM; - } - - Status = BcmAllocFlashCSStructure(psAdapter); - if (Status) { - BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Memory Allocation for Flash structure failed"); - return Status; - } - - Status = vendorextnInit(psAdapter); - - if (STATUS_SUCCESS != Status) { - BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Vendor Init Failed"); - return Status; - } - - BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Adapter initialised"); - - return STATUS_SUCCESS; -} - -void AdapterFree(struct bcm_mini_adapter *Adapter) -{ - int count; - - beceem_protocol_reset(Adapter); - vendorextnExit(Adapter); - - if (Adapter->control_packet_handler && !IS_ERR(Adapter->control_packet_handler)) - kthread_stop(Adapter->control_packet_handler); - - if (Adapter->transmit_packet_thread && !IS_ERR(Adapter->transmit_packet_thread)) - kthread_stop(Adapter->transmit_packet_thread); - - wake_up(&Adapter->process_read_wait_queue); - - if (Adapter->LEDInfo.led_thread_running & (BCM_LED_THREAD_RUNNING_ACTIVELY | BCM_LED_THREAD_RUNNING_INACTIVELY)) - kthread_stop(Adapter->LEDInfo.led_cntrl_threadid); - - unregister_networkdev(Adapter); - - /* FIXME: use proper wait_event and refcounting */ - while (atomic_read(&Adapter->ApplicationRunning)) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Waiting for Application to close.. %d\n", atomic_read(&Adapter->ApplicationRunning)); - msleep(100); - } - unregister_control_device_interface(Adapter); - kfree(Adapter->pstargetparams); - - for (count = 0; count < MAX_CNTRL_PKTS; count++) - kfree(Adapter->txctlpacket[count]); - - FreeAdapterDsxBuffer(Adapter); - kfree(Adapter->pvInterfaceAdapter); - - /* Free the PHS Interface */ - PhsCleanup(&Adapter->stBCMPhsContext); - - BcmDeAllocFlashCSStructure(Adapter); - - free_netdev(Adapter->dev); -} - -static int create_worker_threads(struct bcm_mini_adapter *psAdapter) -{ - /* Rx Control Packets Processing */ - psAdapter->control_packet_handler = kthread_run((int (*)(void *)) - control_packet_handler, psAdapter, "%s-rx", DRV_NAME); - if (IS_ERR(psAdapter->control_packet_handler)) { - pr_notice(DRV_NAME ": could not create control thread\n"); - return PTR_ERR(psAdapter->control_packet_handler); - } - - /* Tx Thread */ - psAdapter->transmit_packet_thread = kthread_run((int (*)(void *)) - tx_pkt_handler, psAdapter, "%s-tx", DRV_NAME); - if (IS_ERR(psAdapter->transmit_packet_thread)) { - pr_notice(DRV_NAME ": could not creat transmit thread\n"); - kthread_stop(psAdapter->control_packet_handler); - return PTR_ERR(psAdapter->transmit_packet_thread); - } - return 0; -} - -static struct file *open_firmware_file(struct bcm_mini_adapter *Adapter, const char *path) -{ - struct file *flp = filp_open(path, O_RDONLY, S_IRWXU); - - if (IS_ERR(flp)) { - pr_err(DRV_NAME "Unable To Open File %s, err %ld", path, PTR_ERR(flp)); - flp = NULL; - } - - if (Adapter->device_removed) - flp = NULL; - - return flp; -} - -/* Arguments: - * Logical Adapter - * Path to image file - * Download Address on the chip - */ -static int BcmFileDownload(struct bcm_mini_adapter *Adapter, const char *path, unsigned int loc) -{ - int errorno = 0; - struct file *flp = NULL; - struct timeval tv = {0}; - - flp = open_firmware_file(Adapter, path); - if (!flp) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Unable to Open %s\n", path); - return -ENOENT; - } - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Opened file is = %s and length =0x%lx to be downloaded at =0x%x", path, (unsigned long)file_inode(flp)->i_size, loc); - do_gettimeofday(&tv); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "download start %lx", ((tv.tv_sec * 1000) + (tv.tv_usec / 1000))); - if (Adapter->bcm_file_download(Adapter->pvInterfaceAdapter, flp, loc)) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Failed to download the firmware with error %x!!!", -EIO); - errorno = -EIO; - goto exit_download; - } - vfs_llseek(flp, 0, 0); - if (Adapter->bcm_file_readback_from_chip(Adapter->pvInterfaceAdapter, flp, loc)) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Failed to read back firmware!"); - errorno = -EIO; - goto exit_download; - } - -exit_download: - filp_close(flp, NULL); - return errorno; -} - -/** - * @ingroup ctrl_pkt_functions - * This function copies the contents of given buffer - * to the control packet and queues it for transmission. - * @note Do not acquire the spinlock, as it it already acquired. - * @return SUCCESS/FAILURE. - * Arguments: - * Logical Adapter - * Control Packet Buffer - */ -int CopyBufferToControlPacket(struct bcm_mini_adapter *Adapter, void *ioBuffer) -{ - struct bcm_leader *pLeader = NULL; - int Status = 0; - unsigned char *ctrl_buff; - unsigned int pktlen = 0; - struct bcm_link_request *pLinkReq = NULL; - PUCHAR pucAddIndication = NULL; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "======>"); - if (!ioBuffer) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Got Null Buffer\n"); - return -EINVAL; - } - - pLinkReq = (struct bcm_link_request *)ioBuffer; - pLeader = (struct bcm_leader *)ioBuffer; /* ioBuffer Contains sw_Status and Payload */ - - if (Adapter->bShutStatus == TRUE && - pLinkReq->szData[0] == LINK_DOWN_REQ_PAYLOAD && - pLinkReq->szData[1] == LINK_SYNC_UP_SUBTYPE) { - - /* Got sync down in SHUTDOWN..we could not process this. */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "SYNC DOWN Request in Shut Down Mode..\n"); - return STATUS_FAILURE; - } - - if ((pLeader->Status == LINK_UP_CONTROL_REQ) && - ((pLinkReq->szData[0] == LINK_UP_REQ_PAYLOAD && - (pLinkReq->szData[1] == LINK_SYNC_UP_SUBTYPE)) || /* Sync Up Command */ - pLinkReq->szData[0] == NETWORK_ENTRY_REQ_PAYLOAD)) /* Net Entry Command */ { - - if (Adapter->LinkStatus > PHY_SYNC_ACHIVED) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "LinkStatus is Greater than PHY_SYN_ACHIEVED"); - return STATUS_FAILURE; - } - - if (Adapter->bShutStatus == TRUE) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "SYNC UP IN SHUTDOWN..Device WakeUp\n"); - if (Adapter->bTriedToWakeUpFromlowPowerMode == false) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Waking up for the First Time..\n"); - Adapter->usIdleModePattern = ABORT_SHUTDOWN_MODE; /* change it to 1 for current support. */ - Adapter->bWakeUpDevice = TRUE; - wake_up(&Adapter->process_rx_cntrlpkt); - Status = wait_event_interruptible_timeout(Adapter->lowpower_mode_wait_queue, !Adapter->bShutStatus, (5 * HZ)); - - if (Status == -ERESTARTSYS) - return Status; - - if (Adapter->bShutStatus) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Shutdown Mode Wake up Failed - No Wake Up Received\n"); - return STATUS_FAILURE; - } - } else { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Wakeup has been tried already...\n"); - } - } - } - - if (Adapter->IdleMode == TRUE) { - /* BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Device is in Idle mode ... hence\n"); */ - if (pLeader->Status == LINK_UP_CONTROL_REQ || pLeader->Status == 0x80 || - pLeader->Status == CM_CONTROL_NEWDSX_MULTICLASSIFIER_REQ) { - - if ((pLeader->Status == LINK_UP_CONTROL_REQ) && (pLinkReq->szData[0] == LINK_DOWN_REQ_PAYLOAD)) { - if (pLinkReq->szData[1] == LINK_SYNC_DOWN_SUBTYPE) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Link Down Sent in Idle Mode\n"); - Adapter->usIdleModePattern = ABORT_IDLE_SYNCDOWN; /* LINK DOWN sent in Idle Mode */ - } else { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "ABORT_IDLE_MODE pattern is being written\n"); - Adapter->usIdleModePattern = ABORT_IDLE_REG; - } - } else { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "ABORT_IDLE_MODE pattern is being written\n"); - Adapter->usIdleModePattern = ABORT_IDLE_MODE; - } - - /*Setting bIdleMode_tx_from_host to TRUE to indicate LED control thread to represent - * the wake up from idlemode is from host - */ - /* Adapter->LEDInfo.bIdleMode_tx_from_host = TRUE; */ - Adapter->bWakeUpDevice = TRUE; - wake_up(&Adapter->process_rx_cntrlpkt); - - /* We should not send DREG message down while in idlemode. */ - if (LINK_DOWN_REQ_PAYLOAD == pLinkReq->szData[0]) - return STATUS_SUCCESS; - - Status = wait_event_interruptible_timeout(Adapter->lowpower_mode_wait_queue, !Adapter->IdleMode, (5 * HZ)); - - if (Status == -ERESTARTSYS) - return Status; - - if (Adapter->IdleMode) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Idle Mode Wake up Failed - No Wake Up Received\n"); - return STATUS_FAILURE; - } - } else { - return STATUS_SUCCESS; - } - } - - /* The Driver has to send control messages with a particular VCID */ - pLeader->Vcid = VCID_CONTROL_PACKET; /* VCID for control packet. */ - - /* Allocate skb for Control Packet */ - pktlen = pLeader->PLength; - ctrl_buff = (char *)Adapter->txctlpacket[atomic_read(&Adapter->index_wr_txcntrlpkt)%MAX_CNTRL_PKTS]; - - if (!ctrl_buff) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "mem allocation Failed"); - return -ENOMEM; - } - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Control packet to be taken =%d and address is =%pincoming address is =%p and packet len=%x", - atomic_read(&Adapter->index_wr_txcntrlpkt), ctrl_buff, ioBuffer, pktlen); - - if (pLeader) { - if ((pLeader->Status == 0x80) || - (pLeader->Status == CM_CONTROL_NEWDSX_MULTICLASSIFIER_REQ)) { - /* - * Restructure the DSX message to handle Multiple classifier Support - * Write the Service Flow param Structures directly to the target - * and embed the pointers in the DSX messages sent to target. - */ - /* Lets store the current length of the control packet we are transmitting */ - pucAddIndication = (PUCHAR)ioBuffer + LEADER_SIZE; - pktlen = pLeader->PLength; - Status = StoreCmControlResponseMessage(Adapter, pucAddIndication, &pktlen); - if (Status != 1) { - ClearTargetDSXBuffer(Adapter, ((struct bcm_add_indication_alt *)pucAddIndication)->u16TID, false); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, " Error Restoring The DSX Control Packet. Dsx Buffers on Target may not be Setup Properly "); - return STATUS_FAILURE; - } - /* - * update the leader to use the new length - * The length of the control packet is length of message being sent + Leader length - */ - pLeader->PLength = pktlen; - } - } - - if (pktlen + LEADER_SIZE > MAX_CNTL_PKT_SIZE) - return -EINVAL; - - memset(ctrl_buff, 0, pktlen+LEADER_SIZE); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Copying the Control Packet Buffer with length=%d\n", pLeader->PLength); - *(struct bcm_leader *)ctrl_buff = *pLeader; - memcpy(ctrl_buff + LEADER_SIZE, ((PUCHAR)ioBuffer + LEADER_SIZE), pLeader->PLength); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Enqueuing the Control Packet"); - - /* Update the statistics counters */ - spin_lock_bh(&Adapter->PackInfo[HiPriority].SFQueueLock); - Adapter->PackInfo[HiPriority].uiCurrentBytesOnHost += pLeader->PLength; - Adapter->PackInfo[HiPriority].uiCurrentPacketsOnHost++; - atomic_inc(&Adapter->TotalPacketCount); - spin_unlock_bh(&Adapter->PackInfo[HiPriority].SFQueueLock); - Adapter->PackInfo[HiPriority].bValid = TRUE; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "CurrBytesOnHost: %x bValid: %x", - Adapter->PackInfo[HiPriority].uiCurrentBytesOnHost, - Adapter->PackInfo[HiPriority].bValid); - Status = STATUS_SUCCESS; - /*Queue the packet for transmission */ - atomic_inc(&Adapter->index_wr_txcntrlpkt); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Calling transmit_packets"); - atomic_set(&Adapter->TxPktAvail, 1); - wake_up(&Adapter->tx_packet_wait_queue); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "<===="); - return Status; -} - -/****************************************************************** -* Function - LinkMessage() -* -* Description - This function builds the Sync-up and Link-up request -* packet messages depending on the device Link status. -* -* Parameters - Adapter: Pointer to the Adapter structure. -* -* Returns - None. -*******************************************************************/ -void LinkMessage(struct bcm_mini_adapter *Adapter) -{ - struct bcm_link_request *pstLinkRequest = NULL; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "=====>"); - if (Adapter->LinkStatus == SYNC_UP_REQUEST && Adapter->AutoSyncup) { - pstLinkRequest = kzalloc(sizeof(struct bcm_link_request), GFP_ATOMIC); - if (!pstLinkRequest) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "Can not allocate memory for Link request!"); - return; - } - /* sync up request... */ - Adapter->LinkStatus = WAIT_FOR_SYNC; /* current link status */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "Requesting For SyncUp..."); - pstLinkRequest->szData[0] = LINK_UP_REQ_PAYLOAD; - pstLinkRequest->szData[1] = LINK_SYNC_UP_SUBTYPE; - pstLinkRequest->Leader.Status = LINK_UP_CONTROL_REQ; - pstLinkRequest->Leader.PLength = sizeof(ULONG); - Adapter->bSyncUpRequestSent = TRUE; - - } else if (Adapter->LinkStatus == PHY_SYNC_ACHIVED && Adapter->AutoLinkUp) { - pstLinkRequest = kzalloc(sizeof(struct bcm_link_request), GFP_ATOMIC); - if (!pstLinkRequest) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "Can not allocate memory for Link request!"); - return; - } - /* LINK_UP_REQUEST */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "Requesting For LinkUp..."); - pstLinkRequest->szData[0] = LINK_UP_REQ_PAYLOAD; - pstLinkRequest->szData[1] = LINK_NET_ENTRY; - pstLinkRequest->Leader.Status = LINK_UP_CONTROL_REQ; - pstLinkRequest->Leader.PLength = sizeof(ULONG); - } - if (pstLinkRequest) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "Calling CopyBufferToControlPacket"); - CopyBufferToControlPacket(Adapter, pstLinkRequest); - kfree(pstLinkRequest); - } - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "LinkMessage <====="); - return; -} - -/********************************************************************** -* Function - StatisticsResponse() -* -* Description - This function handles the Statistics response packet. -* -* Parameters - Adapter : Pointer to the Adapter structure. -* - pvBuffer: Starting address of Statistic response data. -* -* Returns - None. -************************************************************************/ -void StatisticsResponse(struct bcm_mini_adapter *Adapter, void *pvBuffer) -{ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "%s====>", __func__); - Adapter->StatisticsPointer = ntohl(*(__be32 *)pvBuffer); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Stats at %x", (unsigned int)Adapter->StatisticsPointer); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "%s <====", __func__); -} - -/********************************************************************** -* Function - LinkControlResponseMessage() -* -* Description - This function handles the Link response packets. -* -* Parameters - Adapter : Pointer to the Adapter structure. -* - pucBuffer: Starting address of Link response data. -* -* Returns - None. -***********************************************************************/ -void LinkControlResponseMessage(struct bcm_mini_adapter *Adapter, PUCHAR pucBuffer) -{ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "=====>"); - - if (*pucBuffer == LINK_UP_ACK) { - switch (*(pucBuffer+1)) { - case PHY_SYNC_ACHIVED: /* SYNCed UP */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "PHY_SYNC_ACHIVED"); - - if (Adapter->LinkStatus == LINKUP_DONE) - beceem_protocol_reset(Adapter); - - Adapter->usBestEffortQueueIndex = INVALID_QUEUE_INDEX; - Adapter->LinkStatus = PHY_SYNC_ACHIVED; - - if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) { - Adapter->DriverState = NO_NETWORK_ENTRY; - wake_up(&Adapter->LEDInfo.notify_led_event); - } - - LinkMessage(Adapter); - break; - - case LINKUP_DONE: - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "LINKUP_DONE"); - Adapter->LinkStatus = LINKUP_DONE; - Adapter->bPHSEnabled = *(pucBuffer+3); - Adapter->bETHCSEnabled = *(pucBuffer+4) & ETH_CS_MASK; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "PHS Support Status Received In LinkUp Ack : %x\n", Adapter->bPHSEnabled); - - if ((false == Adapter->bShutStatus) && (false == Adapter->IdleMode)) { - if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) { - Adapter->DriverState = NORMAL_OPERATION; - wake_up(&Adapter->LEDInfo.notify_led_event); - } - } - LinkMessage(Adapter); - break; - - case WAIT_FOR_SYNC: - /* - * Driver to ignore the DREG_RECEIVED - * WiMAX Application should handle this Message - */ - /* Adapter->liTimeSinceLastNetEntry = 0; */ - Adapter->LinkUpStatus = 0; - Adapter->LinkStatus = 0; - Adapter->usBestEffortQueueIndex = INVALID_QUEUE_INDEX; - Adapter->bTriedToWakeUpFromlowPowerMode = false; - Adapter->IdleMode = false; - beceem_protocol_reset(Adapter); - - break; - case LINK_SHUTDOWN_REQ_FROM_FIRMWARE: - case COMPLETE_WAKE_UP_NOTIFICATION_FRM_FW: - { - HandleShutDownModeRequest(Adapter, pucBuffer); - } - break; - default: - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "default case:LinkResponse %x", *(pucBuffer + 1)); - break; - } - } else if (SET_MAC_ADDRESS_RESPONSE == *pucBuffer) { - PUCHAR puMacAddr = (pucBuffer + 1); - - Adapter->LinkStatus = SYNC_UP_REQUEST; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "MAC address response, sending SYNC_UP"); - LinkMessage(Adapter); - memcpy(Adapter->dev->dev_addr, puMacAddr, MAC_ADDRESS_SIZE); - } - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "%s <=====", __func__); -} - -void SendIdleModeResponse(struct bcm_mini_adapter *Adapter) -{ - int status = 0, NVMAccess = 0, lowPwrAbortMsg = 0; - struct timeval tv; - struct bcm_link_request stIdleResponse = {{0} }; - - memset(&tv, 0, sizeof(tv)); - stIdleResponse.Leader.Status = IDLE_MESSAGE; - stIdleResponse.Leader.PLength = IDLE_MODE_PAYLOAD_LENGTH; - stIdleResponse.szData[0] = GO_TO_IDLE_MODE_PAYLOAD; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, " ============>"); - - /********************************* - *down_trylock - - * if [ semaphore is available ] - * acquire semaphone and return value 0 ; - * else - * return non-zero value ; - * - ***********************************/ - - NVMAccess = down_trylock(&Adapter->NVMRdmWrmLock); - lowPwrAbortMsg = down_trylock(&Adapter->LowPowerModeSync); - - - if ((NVMAccess || lowPwrAbortMsg || atomic_read(&Adapter->TotalPacketCount)) && - (Adapter->ulPowerSaveMode != DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE)) { - - if (!NVMAccess) - up(&Adapter->NVMRdmWrmLock); - - if (!lowPwrAbortMsg) - up(&Adapter->LowPowerModeSync); - - stIdleResponse.szData[1] = TARGET_CAN_NOT_GO_TO_IDLE_MODE; /* NACK- device access is going on. */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "HOST IS NACKING Idle mode To F/W!!!!!!!!"); - Adapter->bPreparingForLowPowerMode = false; - } else { - stIdleResponse.szData[1] = TARGET_CAN_GO_TO_IDLE_MODE; /* 2; Idle ACK */ - Adapter->StatisticsPointer = 0; - - /* Wait for the LED to TURN OFF before sending ACK response */ - if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) { - int iRetVal = 0; - - /* Wake the LED Thread with IDLEMODE_ENTER State */ - Adapter->DriverState = LOWPOWER_MODE_ENTER; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "LED Thread is Running..Hence Setting LED Event as IDLEMODE_ENTER jiffies:%ld", jiffies); - wake_up(&Adapter->LEDInfo.notify_led_event); - - /* Wait for 1 SEC for LED to OFF */ - iRetVal = wait_event_timeout(Adapter->LEDInfo.idleModeSyncEvent, Adapter->LEDInfo.bIdle_led_off, msecs_to_jiffies(1000)); - - /* If Timed Out to Sync IDLE MODE Enter, do IDLE mode Exit and Send NACK to device */ - if (iRetVal <= 0) { - stIdleResponse.szData[1] = TARGET_CAN_NOT_GO_TO_IDLE_MODE; /* NACK- device access is going on. */ - Adapter->DriverState = NORMAL_OPERATION; - wake_up(&Adapter->LEDInfo.notify_led_event); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "NACKING Idle mode as time out happen from LED side!!!!!!!!"); - } - } - - if (stIdleResponse.szData[1] == TARGET_CAN_GO_TO_IDLE_MODE) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "ACKING IDLE MODE !!!!!!!!!"); - down(&Adapter->rdmwrmsync); - Adapter->bPreparingForLowPowerMode = TRUE; - up(&Adapter->rdmwrmsync); - /* Killing all URBS. */ - if (Adapter->bDoSuspend == TRUE) - Bcm_kill_all_URBs((struct bcm_interface_adapter *)(Adapter->pvInterfaceAdapter)); - } else { - Adapter->bPreparingForLowPowerMode = false; - } - - if (!NVMAccess) - up(&Adapter->NVMRdmWrmLock); - - if (!lowPwrAbortMsg) - up(&Adapter->LowPowerModeSync); - } - - status = CopyBufferToControlPacket(Adapter, &stIdleResponse); - if (status != STATUS_SUCCESS) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "fail to send the Idle mode Request\n"); - Adapter->bPreparingForLowPowerMode = false; - StartInterruptUrb((struct bcm_interface_adapter *)(Adapter->pvInterfaceAdapter)); - } - do_gettimeofday(&tv); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "IdleMode Msg submitter to Q :%ld ms", tv.tv_sec * 1000 + tv.tv_usec / 1000); -} - -/****************************************************************** -* Function - DumpPackInfo() -* -* Description - This function dumps the all Queue(PackInfo[]) details. -* -* Parameters - Adapter: Pointer to the Adapter structure. -* -* Returns - None. -*******************************************************************/ -void DumpPackInfo(struct bcm_mini_adapter *Adapter) -{ - unsigned int uiLoopIndex = 0; - unsigned int uiIndex = 0; - unsigned int uiClsfrIndex = 0; - struct bcm_classifier_rule *pstClassifierEntry = NULL; - - for (uiLoopIndex = 0; uiLoopIndex < NO_OF_QUEUES; uiLoopIndex++) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "*********** Showing Details Of Queue %d***** ******", uiLoopIndex); - if (false == Adapter->PackInfo[uiLoopIndex].bValid) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "bValid is false for %X index\n", uiLoopIndex); - continue; - } - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, " Dumping SF Rule Entry For SFID %lX\n", Adapter->PackInfo[uiLoopIndex].ulSFID); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, " ucDirection %X\n", Adapter->PackInfo[uiLoopIndex].ucDirection); - - if (Adapter->PackInfo[uiLoopIndex].ucIpVersion == IPV6) - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Ipv6 Service Flow\n"); - else - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Ipv4 Service Flow\n"); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "SF Traffic Priority %X\n", Adapter->PackInfo[uiLoopIndex].u8TrafficPriority); - - for (uiClsfrIndex = 0; uiClsfrIndex < MAX_CLASSIFIERS; uiClsfrIndex++) { - pstClassifierEntry = &Adapter->astClassifierTable[uiClsfrIndex]; - if (!pstClassifierEntry->bUsed) - continue; - - if (pstClassifierEntry->ulSFID != Adapter->PackInfo[uiLoopIndex].ulSFID) - continue; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tDumping Classifier Rule Entry For Index: %X Classifier Rule ID : %X\n", uiClsfrIndex, pstClassifierEntry->uiClassifierRuleIndex); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tDumping Classifier Rule Entry For Index: %X usVCID_Value : %X\n", uiClsfrIndex, pstClassifierEntry->usVCID_Value); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tDumping Classifier Rule Entry For Index: %X bProtocolValid : %X\n", uiClsfrIndex, pstClassifierEntry->bProtocolValid); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tDumping Classifier Rule Entry For Index: %X bTOSValid : %X\n", uiClsfrIndex, pstClassifierEntry->bTOSValid); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tDumping Classifier Rule Entry For Index: %X bDestIpValid : %X\n", uiClsfrIndex, pstClassifierEntry->bDestIpValid); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tDumping Classifier Rule Entry For Index: %X bSrcIpValid : %X\n", uiClsfrIndex, pstClassifierEntry->bSrcIpValid); - - for (uiIndex = 0; uiIndex < MAX_PORT_RANGE; uiIndex++) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tusSrcPortRangeLo:%X\n", pstClassifierEntry->usSrcPortRangeLo[uiIndex]); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tusSrcPortRangeHi:%X\n", pstClassifierEntry->usSrcPortRangeHi[uiIndex]); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tusDestPortRangeLo:%X\n", pstClassifierEntry->usDestPortRangeLo[uiIndex]); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tusDestPortRangeHi:%X\n", pstClassifierEntry->usDestPortRangeHi[uiIndex]); - } - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tucIPSourceAddressLength : 0x%x\n", pstClassifierEntry->ucIPSourceAddressLength); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tucIPDestinationAddressLength : 0x%x\n", pstClassifierEntry->ucIPDestinationAddressLength); - for (uiIndex = 0; uiIndex < pstClassifierEntry->ucIPSourceAddressLength; uiIndex++) { - if (Adapter->PackInfo[uiLoopIndex].ucIpVersion == IPV6) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tIpv6 ulSrcIpAddr :\n"); - DumpIpv6Address(pstClassifierEntry->stSrcIpAddress.ulIpv6Addr); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tIpv6 ulSrcIpMask :\n"); - DumpIpv6Address(pstClassifierEntry->stSrcIpAddress.ulIpv6Mask); - } else { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tulSrcIpAddr:%lX\n", pstClassifierEntry->stSrcIpAddress.ulIpv4Addr[uiIndex]); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tulSrcIpMask:%lX\n", pstClassifierEntry->stSrcIpAddress.ulIpv4Mask[uiIndex]); - } - } - - for (uiIndex = 0; uiIndex < pstClassifierEntry->ucIPDestinationAddressLength; uiIndex++) { - if (Adapter->PackInfo[uiLoopIndex].ucIpVersion == IPV6) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tIpv6 ulDestIpAddr :\n"); - DumpIpv6Address(pstClassifierEntry->stDestIpAddress.ulIpv6Addr); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tIpv6 ulDestIpMask :\n"); - DumpIpv6Address(pstClassifierEntry->stDestIpAddress.ulIpv6Mask); - } else { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tulDestIpAddr:%lX\n", pstClassifierEntry->stDestIpAddress.ulIpv4Addr[uiIndex]); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tulDestIpMask:%lX\n", pstClassifierEntry->stDestIpAddress.ulIpv4Mask[uiIndex]); - } - } - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tucProtocol:0x%X\n", pstClassifierEntry->ucProtocol[0]); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tu8ClassifierRulePriority:%X\n", pstClassifierEntry->u8ClassifierRulePriority); - } - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "ulSFID:%lX\n", Adapter->PackInfo[uiLoopIndex].ulSFID); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "usVCID_Value:%X\n", Adapter->PackInfo[uiLoopIndex].usVCID_Value); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "PhsEnabled: 0x%X\n", Adapter->PackInfo[uiLoopIndex].bHeaderSuppressionEnabled); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiThreshold:%X\n", Adapter->PackInfo[uiLoopIndex].uiThreshold); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "bValid:%X\n", Adapter->PackInfo[uiLoopIndex].bValid); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "bActive:%X\n", Adapter->PackInfo[uiLoopIndex].bActive); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "ActivateReqSent: %x", Adapter->PackInfo[uiLoopIndex].bActivateRequestSent); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "u8QueueType:%X\n", Adapter->PackInfo[uiLoopIndex].u8QueueType); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiMaxBucketSize:%X\n", Adapter->PackInfo[uiLoopIndex].uiMaxBucketSize); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiPerSFTxResourceCount:%X\n", atomic_read(&Adapter->PackInfo[uiLoopIndex].uiPerSFTxResourceCount)); - /* DumpDebug(DUMP_INFO,("bCSSupport:%X\n",Adapter->PackInfo[uiLoopIndex].bCSSupport)); */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "CurrQueueDepthOnTarget: %x\n", Adapter->PackInfo[uiLoopIndex].uiCurrentQueueDepthOnTarget); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiCurrentBytesOnHost:%X\n", Adapter->PackInfo[uiLoopIndex].uiCurrentBytesOnHost); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiCurrentPacketsOnHost:%X\n", Adapter->PackInfo[uiLoopIndex].uiCurrentPacketsOnHost); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiDroppedCountBytes:%X\n", Adapter->PackInfo[uiLoopIndex].uiDroppedCountBytes); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiDroppedCountPackets:%X\n", Adapter->PackInfo[uiLoopIndex].uiDroppedCountPackets); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiSentBytes:%X\n", Adapter->PackInfo[uiLoopIndex].uiSentBytes); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiSentPackets:%X\n", Adapter->PackInfo[uiLoopIndex].uiSentPackets); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiCurrentDrainRate:%X\n", Adapter->PackInfo[uiLoopIndex].uiCurrentDrainRate); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiThisPeriodSentBytes:%X\n", Adapter->PackInfo[uiLoopIndex].uiThisPeriodSentBytes); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "liDrainCalculated:%llX\n", Adapter->PackInfo[uiLoopIndex].liDrainCalculated); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiCurrentTokenCount:%X\n", Adapter->PackInfo[uiLoopIndex].uiCurrentTokenCount); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "liLastUpdateTokenAt:%llX\n", Adapter->PackInfo[uiLoopIndex].liLastUpdateTokenAt); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiMaxAllowedRate:%X\n", Adapter->PackInfo[uiLoopIndex].uiMaxAllowedRate); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiPendedLast:%X\n", Adapter->PackInfo[uiLoopIndex].uiPendedLast); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "NumOfPacketsSent:%X\n", Adapter->PackInfo[uiLoopIndex].NumOfPacketsSent); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Direction: %x\n", Adapter->PackInfo[uiLoopIndex].ucDirection); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "CID: %x\n", Adapter->PackInfo[uiLoopIndex].usCID); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "ProtocolValid: %x\n", Adapter->PackInfo[uiLoopIndex].bProtocolValid); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "TOSValid: %x\n", Adapter->PackInfo[uiLoopIndex].bTOSValid); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "DestIpValid: %x\n", Adapter->PackInfo[uiLoopIndex].bDestIpValid); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "SrcIpValid: %x\n", Adapter->PackInfo[uiLoopIndex].bSrcIpValid); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "ActiveSet: %x\n", Adapter->PackInfo[uiLoopIndex].bActiveSet); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "AdmittedSet: %x\n", Adapter->PackInfo[uiLoopIndex].bAdmittedSet); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "AuthzSet: %x\n", Adapter->PackInfo[uiLoopIndex].bAuthorizedSet); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "ClassifyPrority: %x\n", Adapter->PackInfo[uiLoopIndex].bClassifierPriority); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiMaxLatency: %x\n", Adapter->PackInfo[uiLoopIndex].uiMaxLatency); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, - DBG_LVL_ALL, "ServiceClassName: %*ph\n", - 4, Adapter->PackInfo[uiLoopIndex]. - ucServiceClassName); -/* BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "bHeaderSuppressionEnabled :%X\n", Adapter->PackInfo[uiLoopIndex].bHeaderSuppressionEnabled); - * BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiTotalTxBytes:%X\n", Adapter->PackInfo[uiLoopIndex].uiTotalTxBytes); - * BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiTotalRxBytes:%X\n", Adapter->PackInfo[uiLoopIndex].uiTotalRxBytes); - * DumpDebug(DUMP_INFO,(" uiRanOutOfResCount:%X\n",Adapter->PackInfo[uiLoopIndex].uiRanOutOfResCount)); - */ - } - - for (uiLoopIndex = 0; uiLoopIndex < MIBS_MAX_HIST_ENTRIES; uiLoopIndex++) - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Adapter->aRxPktSizeHist[%x] = %x\n", uiLoopIndex, Adapter->aRxPktSizeHist[uiLoopIndex]); - - for (uiLoopIndex = 0; uiLoopIndex < MIBS_MAX_HIST_ENTRIES; uiLoopIndex++) - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Adapter->aTxPktSizeHist[%x] = %x\n", uiLoopIndex, Adapter->aTxPktSizeHist[uiLoopIndex]); -} - -int reset_card_proc(struct bcm_mini_adapter *ps_adapter) -{ - int retval = STATUS_SUCCESS; - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - struct bcm_interface_adapter *psIntfAdapter = NULL; - unsigned int value = 0, uiResetValue = 0; - int bytes; - - psIntfAdapter = ((struct bcm_interface_adapter *)(ps_adapter->pvInterfaceAdapter)); - ps_adapter->bDDRInitDone = false; - - if (ps_adapter->chip_id >= T3LPB) { - /* SYS_CFG register is write protected hence for modifying this reg value, it should be read twice before */ - rdmalt(ps_adapter, SYS_CFG, &value, sizeof(value)); - rdmalt(ps_adapter, SYS_CFG, &value, sizeof(value)); - - /* making bit[6...5] same as was before f/w download. this setting force the h/w to */ - /* re-populated the SP RAM area with the string descriptor. */ - value = value | (ps_adapter->syscfgBefFwDld & 0x00000060); - wrmalt(ps_adapter, SYS_CFG, &value, sizeof(value)); - } - - /* killing all submitted URBs. */ - psIntfAdapter->psAdapter->StopAllXaction = TRUE; - Bcm_kill_all_URBs(psIntfAdapter); - /* Reset the UMA-B Device */ - if (ps_adapter->chip_id >= T3LPB) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Resetting UMA-B\n"); - retval = usb_reset_device(psIntfAdapter->udev); - psIntfAdapter->psAdapter->StopAllXaction = false; - - if (retval != STATUS_SUCCESS) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reset failed with ret value :%d", retval); - goto err_exit; - } - - if (ps_adapter->chip_id == BCS220_2 || - ps_adapter->chip_id == BCS220_2BC || - ps_adapter->chip_id == BCS250_BC || - ps_adapter->chip_id == BCS220_3) { - - bytes = rdmalt(ps_adapter, HPM_CONFIG_LDO145, &value, sizeof(value)); - if (bytes < 0) { - retval = bytes; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "read failed with status :%d", retval); - goto err_exit; - } - /* setting 0th bit */ - value |= (1<<0); - retval = wrmalt(ps_adapter, HPM_CONFIG_LDO145, &value, sizeof(value)); - if (retval < 0) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "write failed with status :%d", retval); - goto err_exit; - } - } - } else { - bytes = rdmalt(ps_adapter, 0x0f007018, &value, sizeof(value)); - if (bytes < 0) { - retval = bytes; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "read failed with status :%d", retval); - goto err_exit; - } - value &= (~(1<<16)); - retval = wrmalt(ps_adapter, 0x0f007018, &value, sizeof(value)); - if (retval < 0) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "write failed with status :%d", retval); - goto err_exit; - } - - /* Toggling the GPIO 8, 9 */ - value = 0; - retval = wrmalt(ps_adapter, GPIO_OUTPUT_REGISTER, &value, sizeof(value)); - if (retval < 0) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "write failed with status :%d", retval); - goto err_exit; - } - value = 0x300; - retval = wrmalt(ps_adapter, GPIO_MODE_REGISTER, &value, sizeof(value)); - if (retval < 0) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "write failed with status :%d", retval); - goto err_exit; - } - mdelay(50); - } - - /* ps_adapter->downloadDDR = false; */ - if (ps_adapter->bFlashBoot) { - /* In flash boot mode MIPS state register has reverse polarity. - * So just or with setting bit 30. - * Make the MIPS in Reset state. - */ - rdmalt(ps_adapter, CLOCK_RESET_CNTRL_REG_1, &uiResetValue, sizeof(uiResetValue)); - uiResetValue |= (1<<30); - wrmalt(ps_adapter, CLOCK_RESET_CNTRL_REG_1, &uiResetValue, sizeof(uiResetValue)); - } - - if (ps_adapter->chip_id >= T3LPB) { - uiResetValue = 0; - /* - * WA for SYSConfig Issue. - * Read SYSCFG Twice to make it writable. - */ - rdmalt(ps_adapter, SYS_CFG, &uiResetValue, sizeof(uiResetValue)); - if (uiResetValue & (1<<4)) { - uiResetValue = 0; - rdmalt(ps_adapter, SYS_CFG, &uiResetValue, sizeof(uiResetValue)); /* 2nd read to make it writable. */ - uiResetValue &= (~(1<<4)); - wrmalt(ps_adapter, SYS_CFG, &uiResetValue, sizeof(uiResetValue)); - } - } - uiResetValue = 0; - wrmalt(ps_adapter, 0x0f01186c, &uiResetValue, sizeof(uiResetValue)); - -err_exit: - psIntfAdapter->psAdapter->StopAllXaction = false; - return retval; -} - -int run_card_proc(struct bcm_mini_adapter *ps_adapter) -{ - int status = STATUS_SUCCESS; - int bytes; - - unsigned int value = 0; - { - bytes = rdmalt(ps_adapter, CLOCK_RESET_CNTRL_REG_1, &value, sizeof(value)); - if (bytes < 0) { - status = bytes; - BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "%s:%d\n", __func__, __LINE__); - return status; - } - - if (ps_adapter->bFlashBoot) - value &= (~(1<<30)); - else - value |= (1<<30); - - if (wrmalt(ps_adapter, CLOCK_RESET_CNTRL_REG_1, &value, sizeof(value)) < 0) { - BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "%s:%d\n", __func__, __LINE__); - return STATUS_FAILURE; - } - } - return status; -} - -int InitCardAndDownloadFirmware(struct bcm_mini_adapter *ps_adapter) -{ - int status; - unsigned int value = 0; - /* - * Create the threads first and then download the - * Firm/DDR Settings.. - */ - status = create_worker_threads(ps_adapter); - if (status < 0) - return status; - - status = bcm_parse_target_params(ps_adapter); - if (status) - return status; - - if (ps_adapter->chip_id >= T3LPB) { - rdmalt(ps_adapter, SYS_CFG, &value, sizeof(value)); - ps_adapter->syscfgBefFwDld = value; - - if ((value & 0x60) == 0) - ps_adapter->bFlashBoot = TRUE; - } - - reset_card_proc(ps_adapter); - - /* Initializing the NVM. */ - BcmInitNVM(ps_adapter); - status = ddr_init(ps_adapter); - if (status) { - pr_err(DRV_NAME "ddr_init Failed\n"); - return status; - } - - /* Download cfg file */ - status = buffDnldVerify(ps_adapter, - (PUCHAR)ps_adapter->pstargetparams, - sizeof(struct bcm_target_params), - CONFIG_BEGIN_ADDR); - if (status) { - BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Error downloading CFG file"); - goto OUT; - } - - if (register_networkdev(ps_adapter)) { - BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Register Netdevice failed. Cleanup needs to be performed."); - return -EIO; - } - - if (false == ps_adapter->AutoFirmDld) { - BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "AutoFirmDld Disabled in CFG File..\n"); - /* If Auto f/w download is disable, register the control interface, */ - /* register the control interface after the mailbox. */ - if (register_control_device_interface(ps_adapter) < 0) { - BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Register Control Device failed. Cleanup needs to be performed."); - return -EIO; - } - return STATUS_SUCCESS; - } - - /* - * Do the LED Settings here. It will be used by the Firmware Download - * Thread. - */ - - /* - * 1. If the LED Settings fails, do not stop and do the Firmware download. - * 2. This init would happened only if the cfg file is present, else - * call from the ioctl context. - */ - - status = InitLedSettings(ps_adapter); - if (status) { - BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_PRINTK, 0, 0, "INIT LED FAILED\n"); - return status; - } - - if (ps_adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) { - ps_adapter->DriverState = DRIVER_INIT; - wake_up(&ps_adapter->LEDInfo.notify_led_event); - } - - if (ps_adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) { - ps_adapter->DriverState = FW_DOWNLOAD; - wake_up(&ps_adapter->LEDInfo.notify_led_event); - } - - value = 0; - wrmalt(ps_adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 4, &value, sizeof(value)); - wrmalt(ps_adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 8, &value, sizeof(value)); - - if (ps_adapter->eNVMType == NVM_FLASH) { - status = PropagateCalParamsFromFlashToMemory(ps_adapter); - if (status) { - BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Propagation of Cal param failed .."); - goto OUT; - } - } - - /* Download Firmare */ - status = BcmFileDownload(ps_adapter, BIN_FILE, FIRMWARE_BEGIN_ADDR); - if (status != 0) { - BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "No Firmware File is present...\n"); - goto OUT; - } - - status = run_card_proc(ps_adapter); - if (status) { - BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "run_card_proc Failed\n"); - goto OUT; - } - - ps_adapter->fw_download_done = TRUE; - mdelay(10); - -OUT: - if (ps_adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) { - ps_adapter->DriverState = FW_DOWNLOAD_DONE; - wake_up(&ps_adapter->LEDInfo.notify_led_event); - } - - return status; -} - -static int bcm_parse_target_params(struct bcm_mini_adapter *Adapter) -{ - struct file *flp = NULL; - char *buff; - int len = 0; - - buff = kmalloc(BUFFER_1K, GFP_KERNEL); - if (!buff) - return -ENOMEM; - - Adapter->pstargetparams = kmalloc(sizeof(struct bcm_target_params), GFP_KERNEL); - if (Adapter->pstargetparams == NULL) { - kfree(buff); - return -ENOMEM; - } - - flp = open_firmware_file(Adapter, CFG_FILE); - if (!flp) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "NOT ABLE TO OPEN THE %s FILE\n", CFG_FILE); - kfree(buff); - kfree(Adapter->pstargetparams); - Adapter->pstargetparams = NULL; - return -ENOENT; - } - len = kernel_read(flp, 0, buff, BUFFER_1K); - filp_close(flp, NULL); - - if (len != sizeof(struct bcm_target_params)) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Mismatch in Target Param Structure!\n"); - kfree(buff); - kfree(Adapter->pstargetparams); - Adapter->pstargetparams = NULL; - return -ENOENT; - } - - /* Check for autolink in config params */ - /* - * Values in Adapter->pstargetparams are in network byte order - */ - memcpy(Adapter->pstargetparams, buff, sizeof(struct bcm_target_params)); - kfree(buff); - beceem_parse_target_struct(Adapter); - return STATUS_SUCCESS; -} - -void beceem_parse_target_struct(struct bcm_mini_adapter *Adapter) -{ - unsigned int uiHostDrvrCfg6 = 0, uiEEPROMFlag = 0; - - if (ntohl(Adapter->pstargetparams->m_u32PhyParameter2) & AUTO_SYNC_DISABLE) { - pr_info(DRV_NAME ": AutoSyncup is Disabled\n"); - Adapter->AutoSyncup = false; - } else { - pr_info(DRV_NAME ": AutoSyncup is Enabled\n"); - Adapter->AutoSyncup = TRUE; - } - - if (ntohl(Adapter->pstargetparams->HostDrvrConfig6) & AUTO_LINKUP_ENABLE) { - pr_info(DRV_NAME ": Enabling autolink up"); - Adapter->AutoLinkUp = TRUE; - } else { - pr_info(DRV_NAME ": Disabling autolink up"); - Adapter->AutoLinkUp = false; - } - /* Setting the DDR Setting.. */ - Adapter->DDRSetting = (ntohl(Adapter->pstargetparams->HostDrvrConfig6) >> 8)&0x0F; - Adapter->ulPowerSaveMode = (ntohl(Adapter->pstargetparams->HostDrvrConfig6)>>12)&0x0F; - pr_info(DRV_NAME ": DDR Setting: %x\n", Adapter->DDRSetting); - pr_info(DRV_NAME ": Power Save Mode: %lx\n", Adapter->ulPowerSaveMode); - if (ntohl(Adapter->pstargetparams->HostDrvrConfig6) & AUTO_FIRM_DOWNLOAD) { - pr_info(DRV_NAME ": Enabling Auto Firmware Download\n"); - Adapter->AutoFirmDld = TRUE; - } else { - pr_info(DRV_NAME ": Disabling Auto Firmware Download\n"); - Adapter->AutoFirmDld = false; - } - uiHostDrvrCfg6 = ntohl(Adapter->pstargetparams->HostDrvrConfig6); - Adapter->bMipsConfig = (uiHostDrvrCfg6>>20)&0x01; - pr_info(DRV_NAME ": MIPSConfig : 0x%X\n", Adapter->bMipsConfig); - /* used for backward compatibility. */ - Adapter->bDPLLConfig = (uiHostDrvrCfg6>>19)&0x01; - Adapter->PmuMode = (uiHostDrvrCfg6 >> 24) & 0x03; - pr_info(DRV_NAME ": PMU MODE: %x", Adapter->PmuMode); - - if ((uiHostDrvrCfg6 >> HOST_BUS_SUSPEND_BIT) & (0x01)) { - Adapter->bDoSuspend = TRUE; - pr_info(DRV_NAME ": Making DoSuspend TRUE as per configFile"); - } - - uiEEPROMFlag = ntohl(Adapter->pstargetparams->m_u32EEPROMFlag); - pr_info(DRV_NAME ": uiEEPROMFlag : 0x%X\n", uiEEPROMFlag); - Adapter->eNVMType = (enum bcm_nvm_type)((uiEEPROMFlag>>4)&0x3); - Adapter->bStatusWrite = (uiEEPROMFlag>>6)&0x1; - Adapter->uiSectorSizeInCFG = 1024*(0xFFFF & ntohl(Adapter->pstargetparams->HostDrvrConfig4)); - Adapter->bSectorSizeOverride = (bool) ((ntohl(Adapter->pstargetparams->HostDrvrConfig4))>>16)&0x1; - - if (ntohl(Adapter->pstargetparams->m_u32PowerSavingModeOptions) & 0x01) - Adapter->ulPowerSaveMode = DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE; - - if (Adapter->ulPowerSaveMode != DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE) - doPowerAutoCorrection(Adapter); -} - -static void doPowerAutoCorrection(struct bcm_mini_adapter *psAdapter) -{ - unsigned int reporting_mode; - - reporting_mode = ntohl(psAdapter->pstargetparams->m_u32PowerSavingModeOptions) & 0x02; - psAdapter->bIsAutoCorrectEnabled = !((char)(psAdapter->ulPowerSaveMode >> 3) & 0x1); - - if (reporting_mode) { - BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "can't do suspen/resume as reporting mode is enable"); - psAdapter->bDoSuspend = false; - } - - if (psAdapter->bIsAutoCorrectEnabled && (psAdapter->chip_id >= T3LPB)) { - /* If reporting mode is enable, switch PMU to PMC */ - { - psAdapter->ulPowerSaveMode = DEVICE_POWERSAVE_MODE_AS_PMU_CLOCK_GATING; - psAdapter->bDoSuspend = false; - } - - /* clearing space bit[15..12] */ - psAdapter->pstargetparams->HostDrvrConfig6 &= ~(htonl((0xF << 12))); - /* placing the power save mode option */ - psAdapter->pstargetparams->HostDrvrConfig6 |= htonl((psAdapter->ulPowerSaveMode << 12)); - } else if (psAdapter->bIsAutoCorrectEnabled == false) { - /* remove the autocorrect disable bit set before dumping. */ - psAdapter->ulPowerSaveMode &= ~(1 << 3); - psAdapter->pstargetparams->HostDrvrConfig6 &= ~(htonl(1 << 15)); - BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Using Forced User Choice: %lx\n", psAdapter->ulPowerSaveMode); - } -} - -static void convertEndian(unsigned char rwFlag, unsigned int *puiBuffer, unsigned int uiByteCount) -{ - unsigned int uiIndex = 0; - - if (RWM_WRITE == rwFlag) { - for (uiIndex = 0; uiIndex < (uiByteCount/sizeof(unsigned int)); uiIndex++) - puiBuffer[uiIndex] = htonl(puiBuffer[uiIndex]); - } else { - for (uiIndex = 0; uiIndex < (uiByteCount/sizeof(unsigned int)); uiIndex++) - puiBuffer[uiIndex] = ntohl(puiBuffer[uiIndex]); - } -} - -int rdm(struct bcm_mini_adapter *Adapter, unsigned int uiAddress, PCHAR pucBuff, size_t sSize) -{ - return Adapter->interface_rdm(Adapter->pvInterfaceAdapter, - uiAddress, pucBuff, sSize); -} - -int wrm(struct bcm_mini_adapter *Adapter, unsigned int uiAddress, PCHAR pucBuff, size_t sSize) -{ - int iRetVal; - - iRetVal = Adapter->interface_wrm(Adapter->pvInterfaceAdapter, - uiAddress, pucBuff, sSize); - return iRetVal; -} - -int wrmalt(struct bcm_mini_adapter *Adapter, unsigned int uiAddress, unsigned int *pucBuff, size_t size) -{ - convertEndian(RWM_WRITE, pucBuff, size); - return wrm(Adapter, uiAddress, (PUCHAR)pucBuff, size); -} - -int rdmalt(struct bcm_mini_adapter *Adapter, unsigned int uiAddress, unsigned int *pucBuff, size_t size) -{ - int uiRetVal = 0; - - uiRetVal = rdm(Adapter, uiAddress, (PUCHAR)pucBuff, size); - convertEndian(RWM_READ, (unsigned int *)pucBuff, size); - - return uiRetVal; -} - -int wrmWithLock(struct bcm_mini_adapter *Adapter, unsigned int uiAddress, PCHAR pucBuff, size_t sSize) -{ - int status = STATUS_SUCCESS; - - down(&Adapter->rdmwrmsync); - - if ((Adapter->IdleMode == TRUE) || - (Adapter->bShutStatus == TRUE) || - (Adapter->bPreparingForLowPowerMode == TRUE)) { - - status = -EACCES; - goto exit; - } - - status = wrm(Adapter, uiAddress, pucBuff, sSize); -exit: - up(&Adapter->rdmwrmsync); - return status; -} - -int wrmaltWithLock(struct bcm_mini_adapter *Adapter, unsigned int uiAddress, unsigned int *pucBuff, size_t size) -{ - int iRetVal = STATUS_SUCCESS; - - down(&Adapter->rdmwrmsync); - - if ((Adapter->IdleMode == TRUE) || - (Adapter->bShutStatus == TRUE) || - (Adapter->bPreparingForLowPowerMode == TRUE)) { - - iRetVal = -EACCES; - goto exit; - } - - iRetVal = wrmalt(Adapter, uiAddress, pucBuff, size); -exit: - up(&Adapter->rdmwrmsync); - return iRetVal; -} - -int rdmaltWithLock(struct bcm_mini_adapter *Adapter, unsigned int uiAddress, unsigned int *pucBuff, size_t size) -{ - int uiRetVal = STATUS_SUCCESS; - - down(&Adapter->rdmwrmsync); - if ((Adapter->IdleMode == TRUE) || - (Adapter->bShutStatus == TRUE) || - (Adapter->bPreparingForLowPowerMode == TRUE)) { - - uiRetVal = -EACCES; - goto exit; - } - - uiRetVal = rdmalt(Adapter, uiAddress, pucBuff, size); -exit: - up(&Adapter->rdmwrmsync); - return uiRetVal; -} - -static void HandleShutDownModeWakeup(struct bcm_mini_adapter *Adapter) -{ - int clear_abort_pattern = 0, Status = 0; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "====>\n"); - /* target has woken up From Shut Down */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "Clearing Shut Down Software abort pattern\n"); - Status = wrmalt(Adapter, SW_ABORT_IDLEMODE_LOC, (unsigned int *)&clear_abort_pattern, sizeof(clear_abort_pattern)); - if (Status) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "WRM to SW_ABORT_IDLEMODE_LOC failed with err:%d", Status); - return; - } - - if (Adapter->ulPowerSaveMode != DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE) { - msleep(100); - InterfaceHandleShutdownModeWakeup(Adapter); - msleep(100); - } - - if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) { - Adapter->DriverState = NO_NETWORK_ENTRY; - wake_up(&Adapter->LEDInfo.notify_led_event); - } - - Adapter->bTriedToWakeUpFromlowPowerMode = false; - Adapter->bShutStatus = false; - wake_up(&Adapter->lowpower_mode_wait_queue); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "<====\n"); -} - -static void SendShutModeResponse(struct bcm_mini_adapter *Adapter) -{ - struct bcm_link_request stShutdownResponse; - unsigned int NVMAccess = 0, lowPwrAbortMsg = 0; - unsigned int Status = 0; - - memset(&stShutdownResponse, 0, sizeof(struct bcm_link_request)); - stShutdownResponse.Leader.Status = LINK_UP_CONTROL_REQ; - stShutdownResponse.Leader.PLength = 8; /* 8 bytes; */ - stShutdownResponse.szData[0] = LINK_UP_ACK; - stShutdownResponse.szData[1] = LINK_SHUTDOWN_REQ_FROM_FIRMWARE; - - /********************************* - * down_trylock - - * if [ semaphore is available ] - * acquire semaphone and return value 0 ; - * else - * return non-zero value ; - * - ***********************************/ - - NVMAccess = down_trylock(&Adapter->NVMRdmWrmLock); - lowPwrAbortMsg = down_trylock(&Adapter->LowPowerModeSync); - - if (NVMAccess || lowPwrAbortMsg || atomic_read(&Adapter->TotalPacketCount)) { - if (!NVMAccess) - up(&Adapter->NVMRdmWrmLock); - - if (!lowPwrAbortMsg) - up(&Adapter->LowPowerModeSync); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "Device Access is going on NACK the Shut Down MODE\n"); - stShutdownResponse.szData[2] = SHUTDOWN_NACK_FROM_DRIVER; /* NACK- device access is going on. */ - Adapter->bPreparingForLowPowerMode = false; - } else { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "Sending SHUTDOWN MODE ACK\n"); - stShutdownResponse.szData[2] = SHUTDOWN_ACK_FROM_DRIVER; /* ShutDown ACK */ - - /* Wait for the LED to TURN OFF before sending ACK response */ - if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) { - int iRetVal = 0; - - /* Wake the LED Thread with LOWPOWER_MODE_ENTER State */ - Adapter->DriverState = LOWPOWER_MODE_ENTER; - wake_up(&Adapter->LEDInfo.notify_led_event); - - /* Wait for 1 SEC for LED to OFF */ - iRetVal = wait_event_timeout(Adapter->LEDInfo.idleModeSyncEvent, Adapter->LEDInfo.bIdle_led_off, msecs_to_jiffies(1000)); - - /* If Timed Out to Sync IDLE MODE Enter, do IDLE mode Exit and Send NACK to device */ - if (iRetVal <= 0) { - stShutdownResponse.szData[1] = SHUTDOWN_NACK_FROM_DRIVER; /* NACK- device access is going on. */ - Adapter->DriverState = NO_NETWORK_ENTRY; - wake_up(&Adapter->LEDInfo.notify_led_event); - } - } - - if (stShutdownResponse.szData[2] == SHUTDOWN_ACK_FROM_DRIVER) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "ACKING SHUTDOWN MODE !!!!!!!!!"); - down(&Adapter->rdmwrmsync); - Adapter->bPreparingForLowPowerMode = TRUE; - up(&Adapter->rdmwrmsync); - /* Killing all URBS. */ - if (Adapter->bDoSuspend == TRUE) - Bcm_kill_all_URBs((struct bcm_interface_adapter *)(Adapter->pvInterfaceAdapter)); - } else { - Adapter->bPreparingForLowPowerMode = false; - } - - if (!NVMAccess) - up(&Adapter->NVMRdmWrmLock); - - if (!lowPwrAbortMsg) - up(&Adapter->LowPowerModeSync); - } - - Status = CopyBufferToControlPacket(Adapter, &stShutdownResponse); - if (Status != STATUS_SUCCESS) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "fail to send the Idle mode Request\n"); - Adapter->bPreparingForLowPowerMode = false; - StartInterruptUrb((struct bcm_interface_adapter *)(Adapter->pvInterfaceAdapter)); - } -} - -static void HandleShutDownModeRequest(struct bcm_mini_adapter *Adapter, PUCHAR pucBuffer) -{ - unsigned int uiResetValue = 0; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "====>\n"); - - if (*(pucBuffer+1) == COMPLETE_WAKE_UP_NOTIFICATION_FRM_FW) { - HandleShutDownModeWakeup(Adapter); - } else if (*(pucBuffer+1) == LINK_SHUTDOWN_REQ_FROM_FIRMWARE) { - /* Target wants to go to Shut Down Mode */ - /* InterfacePrepareForShutdown(Adapter); */ - if (Adapter->chip_id == BCS220_2 || - Adapter->chip_id == BCS220_2BC || - Adapter->chip_id == BCS250_BC || - Adapter->chip_id == BCS220_3) { - - rdmalt(Adapter, HPM_CONFIG_MSW, &uiResetValue, 4); - uiResetValue |= (1<<17); - wrmalt(Adapter, HPM_CONFIG_MSW, &uiResetValue, 4); - } - - SendShutModeResponse(Adapter); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "ShutDownModeResponse:Notification received: Sending the response(Ack/Nack)\n"); - } - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "<====\n"); -} - -void ResetCounters(struct bcm_mini_adapter *Adapter) -{ - beceem_protocol_reset(Adapter); - Adapter->CurrNumRecvDescs = 0; - Adapter->PrevNumRecvDescs = 0; - Adapter->LinkUpStatus = 0; - Adapter->LinkStatus = 0; - atomic_set(&Adapter->cntrlpktCnt, 0); - atomic_set(&Adapter->TotalPacketCount, 0); - Adapter->fw_download_done = false; - Adapter->LinkStatus = 0; - Adapter->AutoLinkUp = false; - Adapter->IdleMode = false; - Adapter->bShutStatus = false; -} - -struct bcm_classifier_rule *GetFragIPClsEntry(struct bcm_mini_adapter *Adapter, USHORT usIpIdentification, ULONG SrcIP) -{ - unsigned int uiIndex = 0; - - for (uiIndex = 0; uiIndex < MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES; uiIndex++) { - if ((Adapter->astFragmentedPktClassifierTable[uiIndex].bUsed) && - (Adapter->astFragmentedPktClassifierTable[uiIndex].usIpIdentification == usIpIdentification) && - (Adapter->astFragmentedPktClassifierTable[uiIndex].ulSrcIpAddress == SrcIP) && - !Adapter->astFragmentedPktClassifierTable[uiIndex].bOutOfOrderFragment) - - return Adapter->astFragmentedPktClassifierTable[uiIndex].pstMatchedClassifierEntry; - } - return NULL; -} - -void AddFragIPClsEntry(struct bcm_mini_adapter *Adapter, struct bcm_fragmented_packet_info *psFragPktInfo) -{ - unsigned int uiIndex = 0; - - for (uiIndex = 0; uiIndex < MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES; uiIndex++) { - if (!Adapter->astFragmentedPktClassifierTable[uiIndex].bUsed) { - memcpy(&Adapter->astFragmentedPktClassifierTable[uiIndex], psFragPktInfo, sizeof(struct bcm_fragmented_packet_info)); - break; - } - } -} - -void DelFragIPClsEntry(struct bcm_mini_adapter *Adapter, USHORT usIpIdentification, ULONG SrcIp) -{ - unsigned int uiIndex = 0; - - for (uiIndex = 0; uiIndex < MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES; uiIndex++) { - if ((Adapter->astFragmentedPktClassifierTable[uiIndex].bUsed) && - (Adapter->astFragmentedPktClassifierTable[uiIndex].usIpIdentification == usIpIdentification) && - (Adapter->astFragmentedPktClassifierTable[uiIndex].ulSrcIpAddress == SrcIp)) - - memset(&Adapter->astFragmentedPktClassifierTable[uiIndex], 0, sizeof(struct bcm_fragmented_packet_info)); - } -} - -void update_per_cid_rx(struct bcm_mini_adapter *Adapter) -{ - unsigned int qindex = 0; - - if ((jiffies - Adapter->liDrainCalculated) < XSECONDS) - return; - - for (qindex = 0; qindex < HiPriority; qindex++) { - if (Adapter->PackInfo[qindex].ucDirection == 0) { - Adapter->PackInfo[qindex].uiCurrentRxRate = - (Adapter->PackInfo[qindex].uiCurrentRxRate + - Adapter->PackInfo[qindex].uiThisPeriodRxBytes) / 2; - - Adapter->PackInfo[qindex].uiThisPeriodRxBytes = 0; - } else { - Adapter->PackInfo[qindex].uiCurrentDrainRate = - (Adapter->PackInfo[qindex].uiCurrentDrainRate + - Adapter->PackInfo[qindex].uiThisPeriodSentBytes) / 2; - Adapter->PackInfo[qindex].uiThisPeriodSentBytes = 0; - } - } - Adapter->liDrainCalculated = jiffies; -} - -void update_per_sf_desc_cnts(struct bcm_mini_adapter *Adapter) -{ - int iIndex = 0; - u32 uibuff[MAX_TARGET_DSX_BUFFERS]; - int bytes; - - if (!atomic_read(&Adapter->uiMBupdate)) - return; - - bytes = rdmaltWithLock(Adapter, TARGET_SFID_TXDESC_MAP_LOC, (unsigned int *)uibuff, sizeof(unsigned int) * MAX_TARGET_DSX_BUFFERS); - if (bytes < 0) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "rdm failed\n"); - return; - } - - for (iIndex = 0; iIndex < HiPriority; iIndex++) { - if (Adapter->PackInfo[iIndex].bValid && Adapter->PackInfo[iIndex].ucDirection) { - if (Adapter->PackInfo[iIndex].usVCID_Value < MAX_TARGET_DSX_BUFFERS) - atomic_set(&Adapter->PackInfo[iIndex].uiPerSFTxResourceCount, uibuff[Adapter->PackInfo[iIndex].usVCID_Value]); - else - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Invalid VCID : %x\n", Adapter->PackInfo[iIndex].usVCID_Value); - } - } - atomic_set(&Adapter->uiMBupdate, false); -} - -void flush_queue(struct bcm_mini_adapter *Adapter, unsigned int iQIndex) -{ - struct sk_buff *PacketToDrop = NULL; - struct net_device_stats *netstats = &Adapter->dev->stats; - - spin_lock_bh(&Adapter->PackInfo[iQIndex].SFQueueLock); - - while (Adapter->PackInfo[iQIndex].FirstTxQueue && atomic_read(&Adapter->TotalPacketCount)) { - PacketToDrop = Adapter->PackInfo[iQIndex].FirstTxQueue; - if (PacketToDrop && PacketToDrop->len) { - netstats->tx_dropped++; - DEQUEUEPACKET(Adapter->PackInfo[iQIndex].FirstTxQueue, Adapter->PackInfo[iQIndex].LastTxQueue); - Adapter->PackInfo[iQIndex].uiCurrentPacketsOnHost--; - Adapter->PackInfo[iQIndex].uiCurrentBytesOnHost -= PacketToDrop->len; - - /* Adding dropped statistics */ - Adapter->PackInfo[iQIndex].uiDroppedCountBytes += PacketToDrop->len; - Adapter->PackInfo[iQIndex].uiDroppedCountPackets++; - dev_kfree_skb(PacketToDrop); - atomic_dec(&Adapter->TotalPacketCount); - } - } - spin_unlock_bh(&Adapter->PackInfo[iQIndex].SFQueueLock); -} - -static void beceem_protocol_reset(struct bcm_mini_adapter *Adapter) -{ - int i; - - if (netif_msg_link(Adapter)) - pr_notice(PFX "%s: protocol reset\n", Adapter->dev->name); - - netif_carrier_off(Adapter->dev); - netif_stop_queue(Adapter->dev); - - Adapter->IdleMode = false; - Adapter->LinkUpStatus = false; - ClearTargetDSXBuffer(Adapter, 0, TRUE); - /* Delete All Classifier Rules */ - - for (i = 0; i < HiPriority; i++) - DeleteAllClassifiersForSF(Adapter, i); - - flush_all_queues(Adapter); - - if (Adapter->TimerActive == TRUE) - Adapter->TimerActive = false; - - memset(Adapter->astFragmentedPktClassifierTable, 0, sizeof(struct bcm_fragmented_packet_info) * MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES); - - for (i = 0; i < HiPriority; i++) { - /* resetting only the first size (S_MIBS_SERVICEFLOW_TABLE) for the SF. */ - /* It is same between MIBs and SF. */ - memset(&Adapter->PackInfo[i].stMibsExtServiceFlowTable, 0, sizeof(struct bcm_mibs_parameters)); - } -} diff --git a/drivers/staging/bcm/PHSDefines.h b/drivers/staging/bcm/PHSDefines.h deleted file mode 100644 index cd78ee4ffa22..000000000000 --- a/drivers/staging/bcm/PHSDefines.h +++ /dev/null @@ -1,94 +0,0 @@ -#ifndef BCM_PHS_DEFINES_H -#define BCM_PHS_DEFINES_H - -#define PHS_INVALID_TABLE_INDEX 0xffffffff -#define PHS_MEM_TAG "_SHP" - -/* PHS Defines */ -#define STATUS_PHS_COMPRESSED 0xa1 -#define STATUS_PHS_NOCOMPRESSION 0xa2 -#define APPLY_PHS 1 -#define MAX_NO_BIT 7 -#define ZERO_PHSI 0 -#define VERIFY 0 -#define SIZE_MULTIPLE_32 4 -#define UNCOMPRESSED_PACKET 0 -#define DYNAMIC 0 -#define SUPPRESS 0x80 -#define NO_CLASSIFIER_MATCH 0 -#define SEND_PACKET_UNCOMPRESSED 0 -#define PHSI_IS_ZERO 0 -#define PHSI_LEN 1 -#define ERROR_LEN 0 -#define PHS_BUFFER_SIZE 1532 -#define MAX_PHSRULE_PER_SF 20 -#define MAX_SERVICEFLOWS 17 - -/* PHS Error Defines */ -#define PHS_SUCCESS 0 -#define ERR_PHS_INVALID_DEVICE_EXETENSION 0x800 -#define ERR_PHS_INVALID_PHS_RULE 0x801 -#define ERR_PHS_RULE_ALREADY_EXISTS 0x802 -#define ERR_SF_MATCH_FAIL 0x803 -#define ERR_INVALID_CLASSIFIERTABLE_FOR_SF 0x804 -#define ERR_SFTABLE_FULL 0x805 -#define ERR_CLSASSIFIER_TABLE_FULL 0x806 -#define ERR_PHSRULE_MEMALLOC_FAIL 0x807 -#define ERR_CLSID_MATCH_FAIL 0x808 -#define ERR_PHSRULE_MATCH_FAIL 0x809 - -struct bcm_phs_rule { - u8 u8PHSI; - u8 u8PHSFLength; - u8 u8PHSF[MAX_PHS_LENGTHS]; - u8 u8PHSMLength; - u8 u8PHSM[MAX_PHS_LENGTHS]; - u8 u8PHSS; - u8 u8PHSV; - u8 u8RefCnt; - u8 bUnclassifiedPHSRule; - u8 u8Reserved[3]; - long PHSModifiedBytes; - unsigned long PHSModifiedNumPackets; - unsigned long PHSErrorNumPackets; -}; - -enum bcm_phs_classifier_context { - eActiveClassifierRuleContext, - eOldClassifierRuleContext -}; - -struct bcm_phs_classifier_entry { - u8 bUsed; - u16 uiClassifierRuleId; - u8 u8PHSI; - struct bcm_phs_rule *pstPhsRule; - u8 bUnclassifiedPHSRule; -}; - -struct bcm_phs_classifier_table { - u16 uiTotalClassifiers; - struct bcm_phs_classifier_entry stActivePhsRulesList[MAX_PHSRULE_PER_SF]; - struct bcm_phs_classifier_entry stOldPhsRulesList[MAX_PHSRULE_PER_SF]; - u16 uiOldestPhsRuleIndex; -}; - -struct bcm_phs_entry { - u8 bUsed; - u16 uiVcid; - struct bcm_phs_classifier_table *pstClassifierTable; -}; - -struct bcm_phs_table { - u16 uiTotalServiceFlows; - struct bcm_phs_entry stSFList[MAX_SERVICEFLOWS]; -}; - -struct bcm_phs_extension { - /* PHS Specific data */ - struct bcm_phs_table *pstServiceFlowPhsRulesTable; - void *CompressedTxBuffer; - void *UnCompressedRxBuffer; -}; - -#endif diff --git a/drivers/staging/bcm/PHSModule.c b/drivers/staging/bcm/PHSModule.c deleted file mode 100644 index 262613436d1e..000000000000 --- a/drivers/staging/bcm/PHSModule.c +++ /dev/null @@ -1,1699 +0,0 @@ -#include "headers.h" - -static UINT CreateSFToClassifierRuleMapping(B_UINT16 uiVcid, - B_UINT16 uiClsId, - struct bcm_phs_table *psServiceFlowTable, - struct bcm_phs_rule *psPhsRule, - B_UINT8 u8AssociatedPHSI); - -static UINT CreateClassiferToPHSRuleMapping(B_UINT16 uiVcid, - B_UINT16 uiClsId, - struct bcm_phs_entry *pstServiceFlowEntry, - struct bcm_phs_rule *psPhsRule, - B_UINT8 u8AssociatedPHSI); - -static UINT CreateClassifierPHSRule(B_UINT16 uiClsId, - struct bcm_phs_classifier_table *psaClassifiertable, - struct bcm_phs_rule *psPhsRule, - enum bcm_phs_classifier_context eClsContext, - B_UINT8 u8AssociatedPHSI); - -static UINT UpdateClassifierPHSRule(B_UINT16 uiClsId, - struct bcm_phs_classifier_entry *pstClassifierEntry, - struct bcm_phs_classifier_table *psaClassifiertable, - struct bcm_phs_rule *psPhsRule, - B_UINT8 u8AssociatedPHSI); - -static bool ValidatePHSRuleComplete(const struct bcm_phs_rule *psPhsRule); - -static bool DerefPhsRule(B_UINT16 uiClsId, - struct bcm_phs_classifier_table *psaClassifiertable, - struct bcm_phs_rule *pstPhsRule); - -static UINT GetClassifierEntry(struct bcm_phs_classifier_table *pstClassifierTable, - B_UINT32 uiClsid, - enum bcm_phs_classifier_context eClsContext, - struct bcm_phs_classifier_entry **ppstClassifierEntry); - -static UINT GetPhsRuleEntry(struct bcm_phs_classifier_table *pstClassifierTable, - B_UINT32 uiPHSI, - enum bcm_phs_classifier_context eClsContext, - struct bcm_phs_rule **ppstPhsRule); - -static void free_phs_serviceflow_rules(struct bcm_phs_table *psServiceFlowRulesTable); - -static int phs_compress(struct bcm_phs_rule *phs_members, - unsigned char *in_buf, - unsigned char *out_buf, - unsigned int *header_size, - UINT *new_header_size); - -static int verify_suppress_phsf(unsigned char *in_buffer, - unsigned char *out_buffer, - unsigned char *phsf, - unsigned char *phsm, - unsigned int phss, - unsigned int phsv, - UINT *new_header_size); - -static int phs_decompress(unsigned char *in_buf, - unsigned char *out_buf, - struct bcm_phs_rule *phs_rules, - UINT *header_size); - -static ULONG PhsCompress(void *pvContext, - B_UINT16 uiVcid, - B_UINT16 uiClsId, - void *pvInputBuffer, - void *pvOutputBuffer, - UINT *pOldHeaderSize, - UINT *pNewHeaderSize); - -static ULONG PhsDeCompress(void *pvContext, - B_UINT16 uiVcid, - void *pvInputBuffer, - void *pvOutputBuffer, - UINT *pInHeaderSize, - UINT *pOutHeaderSize); - -#define IN -#define OUT - -/* - * Function: PHSTransmit - * Description: This routine handle PHS(Payload Header Suppression for Tx path. - * It extracts a fragment of the NDIS_PACKET containing the header - * to be suppressed. It then suppresses the header by invoking PHS exported compress routine. - * The header data after suppression is copied back to the NDIS_PACKET. - * - * Input parameters: IN struct bcm_mini_adapter *Adapter - Miniport Adapter Context - * IN Packet - NDIS packet containing data to be transmitted - * IN USHORT Vcid - vcid pertaining to connection on which the packet is being sent.Used to - * identify PHS rule to be applied. - * B_UINT16 uiClassifierRuleID - Classifier Rule ID - * BOOLEAN bHeaderSuppressionEnabled - indicates if header suprression is enabled for SF. - * - * Return: STATUS_SUCCESS - If the send was successful. - * Other - If an error occurred. - */ - -int PHSTransmit(struct bcm_mini_adapter *Adapter, - struct sk_buff **pPacket, - USHORT Vcid, - B_UINT16 uiClassifierRuleID, - bool bHeaderSuppressionEnabled, - UINT *PacketLen, - UCHAR bEthCSSupport) -{ - /* PHS Sepcific */ - UINT unPHSPktHdrBytesCopied = 0; - UINT unPhsOldHdrSize = 0; - UINT unPHSNewPktHeaderLen = 0; - /* Pointer to PHS IN Hdr Buffer */ - PUCHAR pucPHSPktHdrInBuf = - Adapter->stPhsTxContextInfo.ucaHdrSuppressionInBuf; - /* Pointer to PHS OUT Hdr Buffer */ - PUCHAR pucPHSPktHdrOutBuf = - Adapter->stPhsTxContextInfo.ucaHdrSuppressionOutBuf; - UINT usPacketType; - UINT BytesToRemove = 0; - bool bPHSI = 0; - LONG ulPhsStatus = 0; - UINT numBytesCompressed = 0; - struct sk_buff *newPacket = NULL; - struct sk_buff *Packet = *pPacket; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL, - "In PHSTransmit"); - - if (!bEthCSSupport) - BytesToRemove = ETH_HLEN; - /* - * Accumulate the header upto the size we support suppression - * from NDIS packet - */ - - usPacketType = ((struct ethhdr *)(Packet->data))->h_proto; - - pucPHSPktHdrInBuf = Packet->data + BytesToRemove; - /* considering data after ethernet header */ - if ((*PacketLen - BytesToRemove) < MAX_PHS_LENGTHS) - unPHSPktHdrBytesCopied = (*PacketLen - BytesToRemove); - else - unPHSPktHdrBytesCopied = MAX_PHS_LENGTHS; - - if ((unPHSPktHdrBytesCopied > 0) && - (unPHSPktHdrBytesCopied <= MAX_PHS_LENGTHS)) { - - /* - * Step 2 Suppress Header using PHS and fill into intermediate - * ucaPHSPktHdrOutBuf. - * Suppress only if IP Header and PHS Enabled For the - * Service Flow - */ - if (((usPacketType == ETHERNET_FRAMETYPE_IPV4) || - (usPacketType == ETHERNET_FRAMETYPE_IPV6)) && - (bHeaderSuppressionEnabled)) { - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, - DBG_LVL_ALL, - "\nTrying to PHS Compress Using Classifier rule 0x%X", - uiClassifierRuleID); - unPHSNewPktHeaderLen = unPHSPktHdrBytesCopied; - ulPhsStatus = PhsCompress(&Adapter->stBCMPhsContext, - Vcid, - uiClassifierRuleID, - pucPHSPktHdrInBuf, - pucPHSPktHdrOutBuf, - &unPhsOldHdrSize, - &unPHSNewPktHeaderLen); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, - DBG_LVL_ALL, - "\nPHS Old header Size : %d New Header Size %d\n", - unPhsOldHdrSize, unPHSNewPktHeaderLen); - - if (unPHSNewPktHeaderLen == unPhsOldHdrSize) { - - if (ulPhsStatus == STATUS_PHS_COMPRESSED) - bPHSI = *pucPHSPktHdrOutBuf; - - ulPhsStatus = STATUS_PHS_NOCOMPRESSION; - } - - if (ulPhsStatus == STATUS_PHS_COMPRESSED) { - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, - PHS_SEND, DBG_LVL_ALL, - "PHS Sending packet Compressed"); - - if (skb_cloned(Packet)) { - newPacket = - skb_copy(Packet, GFP_ATOMIC); - - if (newPacket == NULL) - return STATUS_FAILURE; - - dev_kfree_skb(Packet); - *pPacket = Packet = newPacket; - pucPHSPktHdrInBuf = - Packet->data + BytesToRemove; - } - - numBytesCompressed = unPhsOldHdrSize - - (unPHSNewPktHeaderLen + PHSI_LEN); - - memcpy(pucPHSPktHdrInBuf + numBytesCompressed, - pucPHSPktHdrOutBuf, - unPHSNewPktHeaderLen + PHSI_LEN); - memcpy(Packet->data + numBytesCompressed, - Packet->data, BytesToRemove); - skb_pull(Packet, numBytesCompressed); - - return STATUS_SUCCESS; - } else { - /* if one byte headroom is not available, - * increase it through skb_cow - */ - if (!(skb_headroom(Packet) > 0)) { - - if (skb_cow(Packet, 1)) { - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_PRINTK, - 0, 0, - "SKB Cow Failed\n"); - return STATUS_FAILURE; - } - } - skb_push(Packet, 1); - - /* - * CAUTION: The MAC Header is getting corrupted - * here for IP CS - can be saved by copying 14 - * Bytes. not needed .... hence corrupting it. - */ - *(Packet->data + BytesToRemove) = bPHSI; - return STATUS_SUCCESS; - } - } else { - - if (!bHeaderSuppressionEnabled) - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, - PHS_SEND, DBG_LVL_ALL, - "\nHeader Suppression Disabled For SF: No PHS\n"); - - return STATUS_SUCCESS; - } - } - - /* BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL, - * "PHSTransmit : Dumping data packet After PHS"); */ - return STATUS_SUCCESS; -} - -int PHSReceive(struct bcm_mini_adapter *Adapter, - USHORT usVcid, - struct sk_buff *packet, - UINT *punPacketLen, - UCHAR *pucEthernetHdr, - UINT bHeaderSuppressionEnabled) -{ - u32 nStandardPktHdrLen = 0; - u32 nTotalsuppressedPktHdrBytes = 0; - int ulPhsStatus = 0; - PUCHAR pucInBuff = NULL; - UINT TotalBytesAdded = 0; - - if (!bHeaderSuppressionEnabled) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, - DBG_LVL_ALL, - "\nPhs Disabled for incoming packet"); - return ulPhsStatus; - } - - pucInBuff = packet->data; - - /* Restore PHS suppressed header */ - nStandardPktHdrLen = packet->len; - ulPhsStatus = PhsDeCompress(&Adapter->stBCMPhsContext, - usVcid, - pucInBuff, - Adapter->ucaPHSPktRestoreBuf, - &nTotalsuppressedPktHdrBytes, - &nStandardPktHdrLen); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL, - "\nSuppressed PktHdrLen : 0x%x Restored PktHdrLen : 0x%x", - nTotalsuppressedPktHdrBytes, nStandardPktHdrLen); - - if (ulPhsStatus != STATUS_PHS_COMPRESSED) { - skb_pull(packet, 1); - return STATUS_SUCCESS; - } else { - TotalBytesAdded = nStandardPktHdrLen - - nTotalsuppressedPktHdrBytes - PHSI_LEN; - - if (TotalBytesAdded) { - if (skb_headroom(packet) >= (SKB_RESERVE_ETHERNET_HEADER + TotalBytesAdded)) - skb_push(packet, TotalBytesAdded); - else { - if (skb_cow(packet, skb_headroom(packet) + TotalBytesAdded)) { - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_PRINTK, 0, 0, - "cow failed in receive\n"); - return STATUS_FAILURE; - } - - skb_push(packet, TotalBytesAdded); - } - } - - memcpy(packet->data, Adapter->ucaPHSPktRestoreBuf, - nStandardPktHdrLen); - } - - return STATUS_SUCCESS; -} - -void DumpFullPacket(UCHAR *pBuf, UINT nPktLen) -{ - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, - "Dumping Data Packet"); - BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, - pBuf, nPktLen); -} - -/* - * Procedure: phs_init - * - * Description: This routine is responsible for allocating memory for classifier - * and PHS rules. - * - * Arguments: - * pPhsdeviceExtension - ptr to Device extension containing PHS Classifier rules - * and PHS Rules , RX, TX buffer etc - * - * Returns: - * TRUE(1) -If allocation of memory was successful. - * FALSE -If allocation of memory fails. - */ -int phs_init(struct bcm_phs_extension *pPhsdeviceExtension, - struct bcm_mini_adapter *Adapter) -{ - int i; - struct bcm_phs_table *pstServiceFlowTable; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, - "\nPHS:phs_init function"); - - if (pPhsdeviceExtension->pstServiceFlowPhsRulesTable) - return -EINVAL; - - pPhsdeviceExtension->pstServiceFlowPhsRulesTable = - kzalloc(sizeof(struct bcm_phs_table), GFP_KERNEL); - - if (!pPhsdeviceExtension->pstServiceFlowPhsRulesTable) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, - DBG_LVL_ALL, - "\nAllocation ServiceFlowPhsRulesTable failed"); - return -ENOMEM; - } - - pstServiceFlowTable = pPhsdeviceExtension->pstServiceFlowPhsRulesTable; - for (i = 0; i < MAX_SERVICEFLOWS; i++) { - struct bcm_phs_entry sServiceFlow = - pstServiceFlowTable->stSFList[i]; - sServiceFlow.pstClassifierTable = - kzalloc(sizeof(struct bcm_phs_classifier_table), - GFP_KERNEL); - if (!sServiceFlow.pstClassifierTable) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, - DBG_LVL_ALL, "\nAllocation failed"); - free_phs_serviceflow_rules(pPhsdeviceExtension->pstServiceFlowPhsRulesTable); - pPhsdeviceExtension->pstServiceFlowPhsRulesTable = NULL; - return -ENOMEM; - } - } - - pPhsdeviceExtension->CompressedTxBuffer = kmalloc(PHS_BUFFER_SIZE, GFP_KERNEL); - if (pPhsdeviceExtension->CompressedTxBuffer == NULL) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, - DBG_LVL_ALL, "\nAllocation failed"); - free_phs_serviceflow_rules(pPhsdeviceExtension->pstServiceFlowPhsRulesTable); - pPhsdeviceExtension->pstServiceFlowPhsRulesTable = NULL; - return -ENOMEM; - } - - pPhsdeviceExtension->UnCompressedRxBuffer = - kmalloc(PHS_BUFFER_SIZE, GFP_KERNEL); - if (pPhsdeviceExtension->UnCompressedRxBuffer == NULL) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, - DBG_LVL_ALL, "\nAllocation failed"); - kfree(pPhsdeviceExtension->CompressedTxBuffer); - free_phs_serviceflow_rules(pPhsdeviceExtension->pstServiceFlowPhsRulesTable); - pPhsdeviceExtension->pstServiceFlowPhsRulesTable = NULL; - return -ENOMEM; - } - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, - "\n phs_init Successful"); - return STATUS_SUCCESS; -} - -int PhsCleanup(IN struct bcm_phs_extension *pPHSDeviceExt) -{ - if (pPHSDeviceExt->pstServiceFlowPhsRulesTable) { - free_phs_serviceflow_rules(pPHSDeviceExt->pstServiceFlowPhsRulesTable); - pPHSDeviceExt->pstServiceFlowPhsRulesTable = NULL; - } - - kfree(pPHSDeviceExt->CompressedTxBuffer); - pPHSDeviceExt->CompressedTxBuffer = NULL; - - kfree(pPHSDeviceExt->UnCompressedRxBuffer); - pPHSDeviceExt->UnCompressedRxBuffer = NULL; - - return 0; -} - -/* - * PHS functions - * PhsUpdateClassifierRule - * - * Routine Description: - * Exported function to add or modify a PHS Rule. - * - * Arguments: - * IN void* pvContext - PHS Driver Specific Context - * IN B_UINT16 uiVcid - The Service Flow ID for which the PHS rule applies - * IN B_UINT16 uiClsId - The Classifier ID within the Service Flow for which the PHS rule applies. - * IN struct bcm_phs_rule *psPhsRule - The PHS Rule strcuture to be added to the PHS Rule table. - * - * Return Value: - * - * 0 if successful, - * >0 Error. - */ -ULONG PhsUpdateClassifierRule(IN void *pvContext, - IN B_UINT16 uiVcid , - IN B_UINT16 uiClsId , - IN struct bcm_phs_rule *psPhsRule, - IN B_UINT8 u8AssociatedPHSI) -{ - ULONG lStatus = 0; - UINT nSFIndex = 0; - struct bcm_phs_entry *pstServiceFlowEntry = NULL; - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - struct bcm_phs_extension *pDeviceExtension = - (struct bcm_phs_extension *)pvContext; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, - "PHS With Corr2 Changes\n"); - - if (pDeviceExtension == NULL) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, - DBG_LVL_ALL, "Invalid Device Extension\n"); - return ERR_PHS_INVALID_DEVICE_EXETENSION; - } - - if (u8AssociatedPHSI == 0) - return ERR_PHS_INVALID_PHS_RULE; - - /* Retrieve the SFID Entry Index for requested Service Flow */ - nSFIndex = GetServiceFlowEntry(pDeviceExtension->pstServiceFlowPhsRulesTable, - uiVcid, &pstServiceFlowEntry); - - if (nSFIndex == PHS_INVALID_TABLE_INDEX) { - /* This is a new SF. Create a mapping entry for this */ - lStatus = CreateSFToClassifierRuleMapping(uiVcid, uiClsId, - pDeviceExtension->pstServiceFlowPhsRulesTable, - psPhsRule, - u8AssociatedPHSI); - return lStatus; - } - - /* SF already Exists Add PHS Rule to existing SF */ - lStatus = CreateClassiferToPHSRuleMapping(uiVcid, uiClsId, - pstServiceFlowEntry, - psPhsRule, - u8AssociatedPHSI); - - return lStatus; -} - -/* - * PhsDeletePHSRule - * - * Routine Description: - * Deletes the specified phs Rule within Vcid - * - * Arguments: - * IN void* pvContext - PHS Driver Specific Context - * IN B_UINT16 uiVcid - The Service Flow ID for which the PHS rule applies - * IN B_UINT8 u8PHSI - the PHS Index identifying PHS rule to be deleted. - * - * Return Value: - * - * 0 if successful, - * >0 Error. - */ -ULONG PhsDeletePHSRule(IN void *pvContext, - IN B_UINT16 uiVcid, - IN B_UINT8 u8PHSI) -{ - UINT nSFIndex = 0, nClsidIndex = 0; - struct bcm_phs_entry *pstServiceFlowEntry = NULL; - struct bcm_phs_classifier_table *pstClassifierRulesTable = NULL; - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - struct bcm_phs_extension *pDeviceExtension = (struct bcm_phs_extension *)pvContext; - struct bcm_phs_classifier_entry *curr_entry; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, - "======>\n"); - - if (pDeviceExtension) { - /* Retrieve the SFID Entry Index for requested Service Flow */ - nSFIndex = GetServiceFlowEntry(pDeviceExtension->pstServiceFlowPhsRulesTable, - uiVcid, &pstServiceFlowEntry); - - if (nSFIndex == PHS_INVALID_TABLE_INDEX) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, - DBG_LVL_ALL, "SFID Match Failed\n"); - return ERR_SF_MATCH_FAIL; - } - - pstClassifierRulesTable = pstServiceFlowEntry->pstClassifierTable; - if (pstClassifierRulesTable) { - for (nClsidIndex = 0; nClsidIndex < MAX_PHSRULE_PER_SF; nClsidIndex++) { - curr_entry = &pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex]; - if (curr_entry->bUsed && - curr_entry->pstPhsRule && - (curr_entry->pstPhsRule->u8PHSI == u8PHSI)) { - - if (curr_entry->pstPhsRule->u8RefCnt) - curr_entry->pstPhsRule->u8RefCnt--; - - if (0 == curr_entry->pstPhsRule->u8RefCnt) - kfree(curr_entry->pstPhsRule); - - memset(curr_entry, - 0, - sizeof(struct bcm_phs_classifier_entry)); - } - } - } - } - return 0; -} - -/* - * PhsDeleteClassifierRule - * - * Routine Description: - * Exported function to Delete a PHS Rule for the SFID,CLSID Pair. - * - * Arguments: - * IN void* pvContext - PHS Driver Specific Context - * IN B_UINT16 uiVcid - The Service Flow ID for which the PHS rule applies - * IN B_UINT16 uiClsId - The Classifier ID within the Service Flow for which the PHS rule applies. - * - * Return Value: - * - * 0 if successful, - * >0 Error. - */ -ULONG PhsDeleteClassifierRule(IN void *pvContext, - IN B_UINT16 uiVcid, - IN B_UINT16 uiClsId) -{ - UINT nSFIndex = 0, nClsidIndex = 0; - struct bcm_phs_entry *pstServiceFlowEntry = NULL; - struct bcm_phs_classifier_entry *pstClassifierEntry = NULL; - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - struct bcm_phs_extension *pDeviceExtension = - (struct bcm_phs_extension *)pvContext; - - if (!pDeviceExtension) - goto out; - - /* Retrieve the SFID Entry Index for requested Service Flow */ - nSFIndex = GetServiceFlowEntry(pDeviceExtension->pstServiceFlowPhsRulesTable, - uiVcid, &pstServiceFlowEntry); - if (nSFIndex == PHS_INVALID_TABLE_INDEX) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, - DBG_LVL_ALL, "SFID Match Failed\n"); - return ERR_SF_MATCH_FAIL; - } - - nClsidIndex = - GetClassifierEntry(pstServiceFlowEntry->pstClassifierTable, - uiClsId, - eActiveClassifierRuleContext, - &pstClassifierEntry); - - if ((nClsidIndex != PHS_INVALID_TABLE_INDEX) && - (!pstClassifierEntry->bUnclassifiedPHSRule)) { - if (pstClassifierEntry->pstPhsRule) { - if (pstClassifierEntry->pstPhsRule->u8RefCnt) - pstClassifierEntry->pstPhsRule->u8RefCnt--; - - if (0 == pstClassifierEntry->pstPhsRule->u8RefCnt) - kfree(pstClassifierEntry->pstPhsRule); - } - memset(pstClassifierEntry, 0, - sizeof(struct bcm_phs_classifier_entry)); - } - - nClsidIndex = - GetClassifierEntry(pstServiceFlowEntry->pstClassifierTable, - uiClsId, - eOldClassifierRuleContext, - &pstClassifierEntry); - - if ((nClsidIndex != PHS_INVALID_TABLE_INDEX) && - (!pstClassifierEntry->bUnclassifiedPHSRule)) { - kfree(pstClassifierEntry->pstPhsRule); - memset(pstClassifierEntry, 0, - sizeof(struct bcm_phs_classifier_entry)); - } - -out: - return 0; -} - -/* - * PhsDeleteSFRules - * - * Routine Description: - * Exported function to Delete a all PHS Rules for the SFID. - * - * Arguments: - * IN void* pvContext - PHS Driver Specific Context - * IN B_UINT16 uiVcid - The Service Flow ID for which the PHS rules need to be deleted - * - * Return Value: - * - * 0 if successful, - * >0 Error. - */ -ULONG PhsDeleteSFRules(IN void *pvContext, IN B_UINT16 uiVcid) -{ - UINT nSFIndex = 0, nClsidIndex = 0; - struct bcm_phs_entry *pstServiceFlowEntry = NULL; - struct bcm_phs_classifier_table *pstClassifierRulesTable = NULL; - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - struct bcm_phs_extension *pDeviceExtension = - (struct bcm_phs_extension *)pvContext; - struct bcm_phs_classifier_entry *curr_clsf_entry; - struct bcm_phs_classifier_entry *curr_rules_list; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, - "====>\n"); - - if (!pDeviceExtension) - goto out; - - /* Retrieve the SFID Entry Index for requested Service Flow */ - nSFIndex = GetServiceFlowEntry(pDeviceExtension->pstServiceFlowPhsRulesTable, - uiVcid, &pstServiceFlowEntry); - if (nSFIndex == PHS_INVALID_TABLE_INDEX) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, - DBG_LVL_ALL, "SFID Match Failed\n"); - return ERR_SF_MATCH_FAIL; - } - - pstClassifierRulesTable = pstServiceFlowEntry->pstClassifierTable; - if (pstClassifierRulesTable) { - for (nClsidIndex = 0; nClsidIndex < MAX_PHSRULE_PER_SF; nClsidIndex++) { - curr_clsf_entry = - &pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex]; - - curr_rules_list = - &pstClassifierRulesTable->stOldPhsRulesList[nClsidIndex]; - - if (curr_clsf_entry->pstPhsRule) { - - if (curr_clsf_entry->pstPhsRule->u8RefCnt) - curr_clsf_entry->pstPhsRule->u8RefCnt--; - - if (0 == curr_clsf_entry->pstPhsRule->u8RefCnt) - kfree(curr_clsf_entry->pstPhsRule); - - curr_clsf_entry->pstPhsRule = NULL; - } - memset(curr_clsf_entry, 0, - sizeof(struct bcm_phs_classifier_entry)); - if (curr_rules_list->pstPhsRule) { - - if (curr_rules_list->pstPhsRule->u8RefCnt) - curr_rules_list->pstPhsRule->u8RefCnt--; - - if (0 == curr_rules_list->pstPhsRule->u8RefCnt) - kfree(curr_rules_list->pstPhsRule); - - curr_rules_list->pstPhsRule = NULL; - } - memset(curr_rules_list, 0, - sizeof(struct bcm_phs_classifier_entry)); - } - } - pstServiceFlowEntry->bUsed = false; - pstServiceFlowEntry->uiVcid = 0; - -out: - return 0; -} - -/* - * PhsCompress - * - * Routine Description: - * Exported function to compress the data using PHS. - * - * Arguments: - * IN void* pvContext - PHS Driver Specific Context. - * IN B_UINT16 uiVcid - The Service Flow ID to which current - * packet header compression applies. - * IN UINT uiClsId - The Classifier ID to which current packet - * header compression applies. - * IN void *pvInputBuffer - The Input buffer containg packet header - * data - * IN void *pvOutputBuffer - The output buffer returned by this - * function after PHS - * IN UINT *pOldHeaderSize - The actual size of the header before PHS - * IN UINT *pNewHeaderSize - The new size of the header after applying - * PHS - * - * Return Value: - * - * 0 if successful, - * >0 Error. - */ -static ULONG PhsCompress(IN void *pvContext, - IN B_UINT16 uiVcid, - IN B_UINT16 uiClsId, - IN void *pvInputBuffer, - OUT void *pvOutputBuffer, - OUT UINT *pOldHeaderSize, - OUT UINT *pNewHeaderSize) -{ - UINT nSFIndex = 0, nClsidIndex = 0; - struct bcm_phs_entry *pstServiceFlowEntry = NULL; - struct bcm_phs_classifier_entry *pstClassifierEntry = NULL; - struct bcm_phs_rule *pstPhsRule = NULL; - ULONG lStatus = 0; - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - struct bcm_phs_extension *pDeviceExtension = - (struct bcm_phs_extension *)pvContext; - - if (pDeviceExtension == NULL) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL, - "Invalid Device Extension\n"); - lStatus = STATUS_PHS_NOCOMPRESSION; - return lStatus; - } - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL, - "Suppressing header\n"); - - /* Retrieve the SFID Entry Index for requested Service Flow */ - nSFIndex = GetServiceFlowEntry(pDeviceExtension->pstServiceFlowPhsRulesTable, - uiVcid, &pstServiceFlowEntry); - if (nSFIndex == PHS_INVALID_TABLE_INDEX) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL, - "SFID Match Failed\n"); - lStatus = STATUS_PHS_NOCOMPRESSION; - return lStatus; - } - - nClsidIndex = GetClassifierEntry(pstServiceFlowEntry->pstClassifierTable, - uiClsId, eActiveClassifierRuleContext, - &pstClassifierEntry); - - if (nClsidIndex == PHS_INVALID_TABLE_INDEX) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL, - "No PHS Rule Defined For Classifier\n"); - lStatus = STATUS_PHS_NOCOMPRESSION; - return lStatus; - } - - /* get rule from SF id,Cls ID pair and proceed */ - pstPhsRule = pstClassifierEntry->pstPhsRule; - if (!ValidatePHSRuleComplete(pstPhsRule)) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, - "PHS Rule Defined For Classifier But Not Complete\n"); - lStatus = STATUS_PHS_NOCOMPRESSION; - return lStatus; - } - - /* Compress Packet */ - lStatus = phs_compress(pstPhsRule, - (PUCHAR)pvInputBuffer, - (PUCHAR)pvOutputBuffer, - pOldHeaderSize, - pNewHeaderSize); - - if (lStatus == STATUS_PHS_COMPRESSED) { - pstPhsRule->PHSModifiedBytes += - *pOldHeaderSize - *pNewHeaderSize - 1; - pstPhsRule->PHSModifiedNumPackets++; - } else { - pstPhsRule->PHSErrorNumPackets++; - } - - return lStatus; -} - -/* - * PhsDeCompress - * - * Routine Description: - * Exported function to restore the packet header in Rx path. - * - * Arguments: - * IN void* pvContext - PHS Driver Specific Context. - * IN B_UINT16 uiVcid - The Service Flow ID to which current - * packet header restoration applies. - * IN void *pvInputBuffer - The Input buffer containg suppressed - * packet header data - * OUT void *pvOutputBuffer - The output buffer returned by this - * function after restoration - * OUT UINT *pHeaderSize - The packet header size after restoration - * is returned in this parameter. - * - * Return Value: - * - * 0 if successful, - * >0 Error. - */ -static ULONG PhsDeCompress(IN void *pvContext, - IN B_UINT16 uiVcid, - IN void *pvInputBuffer, - OUT void *pvOutputBuffer, - OUT UINT *pInHeaderSize, - OUT UINT *pOutHeaderSize) -{ - UINT nSFIndex = 0, nPhsRuleIndex = 0; - struct bcm_phs_entry *pstServiceFlowEntry = NULL; - struct bcm_phs_rule *pstPhsRule = NULL; - UINT phsi; - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - struct bcm_phs_extension *pDeviceExtension = - (struct bcm_phs_extension *)pvContext; - - *pInHeaderSize = 0; - if (pDeviceExtension == NULL) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, - DBG_LVL_ALL, "Invalid Device Extension\n"); - return ERR_PHS_INVALID_DEVICE_EXETENSION; - } - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL, - "Restoring header\n"); - - phsi = *((unsigned char *)(pvInputBuffer)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL, - "PHSI To Be Used For restore : %x\n", phsi); - if (phsi == UNCOMPRESSED_PACKET) - return STATUS_PHS_NOCOMPRESSION; - - /* Retrieve the SFID Entry Index for requested Service Flow */ - nSFIndex = GetServiceFlowEntry(pDeviceExtension->pstServiceFlowPhsRulesTable, - uiVcid, &pstServiceFlowEntry); - if (nSFIndex == PHS_INVALID_TABLE_INDEX) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, - DBG_LVL_ALL, - "SFID Match Failed During Lookup\n"); - return ERR_SF_MATCH_FAIL; - } - - nPhsRuleIndex = GetPhsRuleEntry(pstServiceFlowEntry->pstClassifierTable, - phsi, - eActiveClassifierRuleContext, - &pstPhsRule); - if (nPhsRuleIndex == PHS_INVALID_TABLE_INDEX) { - /* Phs Rule does not exist in active rules table. Lets try - * in the old rules table. */ - nPhsRuleIndex = GetPhsRuleEntry(pstServiceFlowEntry->pstClassifierTable, - phsi, - eOldClassifierRuleContext, - &pstPhsRule); - if (nPhsRuleIndex == PHS_INVALID_TABLE_INDEX) - return ERR_PHSRULE_MATCH_FAIL; - } - - *pInHeaderSize = phs_decompress((PUCHAR)pvInputBuffer, - (PUCHAR)pvOutputBuffer, - pstPhsRule, - pOutHeaderSize); - - pstPhsRule->PHSModifiedBytes += *pOutHeaderSize - *pInHeaderSize - 1; - - pstPhsRule->PHSModifiedNumPackets++; - return STATUS_PHS_COMPRESSED; -} - -/* - * Procedure: free_phs_serviceflow_rules - * - * Description: This routine is responsible for freeing memory allocated for - * PHS rules. - * - * Arguments: - * rules - ptr to S_SERVICEFLOW_TABLE structure. - * - * Returns: - * Does not return any value. - */ -static void free_phs_serviceflow_rules(struct bcm_phs_table *psServiceFlowRulesTable) -{ - int i, j; - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - struct bcm_phs_classifier_entry *curr_act_rules_list; - struct bcm_phs_classifier_entry *curr_old_rules_list; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, - "=======>\n"); - - if (!psServiceFlowRulesTable) - goto out; - - for (i = 0; i < MAX_SERVICEFLOWS; i++) { - struct bcm_phs_entry stServiceFlowEntry = - psServiceFlowRulesTable->stSFList[i]; - struct bcm_phs_classifier_table *pstClassifierRulesTable = - stServiceFlowEntry.pstClassifierTable; - - if (pstClassifierRulesTable) { - for (j = 0; j < MAX_PHSRULE_PER_SF; j++) { - curr_act_rules_list = - &pstClassifierRulesTable->stActivePhsRulesList[j]; - - curr_old_rules_list = - &pstClassifierRulesTable->stOldPhsRulesList[j]; - - if (curr_act_rules_list->pstPhsRule) { - - if (curr_act_rules_list->pstPhsRule->u8RefCnt) - curr_act_rules_list->pstPhsRule->u8RefCnt--; - - if (0 == curr_act_rules_list->pstPhsRule->u8RefCnt) - kfree(curr_act_rules_list->pstPhsRule); - - curr_act_rules_list->pstPhsRule = NULL; - } - - if (curr_old_rules_list->pstPhsRule) { - - if (curr_old_rules_list->pstPhsRule->u8RefCnt) - curr_old_rules_list->pstPhsRule->u8RefCnt--; - - if (0 == curr_old_rules_list->pstPhsRule->u8RefCnt) - kfree(curr_old_rules_list->pstPhsRule); - - curr_old_rules_list->pstPhsRule = NULL; - } - } - kfree(pstClassifierRulesTable); - stServiceFlowEntry.pstClassifierTable = - pstClassifierRulesTable = NULL; - } - } - -out: - - kfree(psServiceFlowRulesTable); - psServiceFlowRulesTable = NULL; -} - -static bool ValidatePHSRuleComplete(IN const struct bcm_phs_rule *psPhsRule) -{ - return (psPhsRule && - psPhsRule->u8PHSI && - psPhsRule->u8PHSS && - psPhsRule->u8PHSFLength); -} - -UINT GetServiceFlowEntry(IN struct bcm_phs_table *psServiceFlowTable, - IN B_UINT16 uiVcid, - struct bcm_phs_entry **ppstServiceFlowEntry) -{ - int i; - struct bcm_phs_entry *curr_sf_list; - - for (i = 0; i < MAX_SERVICEFLOWS; i++) { - curr_sf_list = &psServiceFlowTable->stSFList[i]; - if (curr_sf_list->bUsed && (curr_sf_list->uiVcid == uiVcid)) { - *ppstServiceFlowEntry = curr_sf_list; - return i; - } - } - - *ppstServiceFlowEntry = NULL; - return PHS_INVALID_TABLE_INDEX; -} - -static UINT GetClassifierEntry(IN struct bcm_phs_classifier_table *pstClassifierTable, - IN B_UINT32 uiClsid, - enum bcm_phs_classifier_context eClsContext, - OUT struct bcm_phs_classifier_entry **ppstClassifierEntry) -{ - int i; - struct bcm_phs_classifier_entry *psClassifierRules = NULL; - - for (i = 0; i < MAX_PHSRULE_PER_SF; i++) { - - if (eClsContext == eActiveClassifierRuleContext) - psClassifierRules = - &pstClassifierTable->stActivePhsRulesList[i]; - else - psClassifierRules = - &pstClassifierTable->stOldPhsRulesList[i]; - - if (psClassifierRules->bUsed && - (psClassifierRules->uiClassifierRuleId == uiClsid)) { - *ppstClassifierEntry = psClassifierRules; - return i; - } - } - - *ppstClassifierEntry = NULL; - return PHS_INVALID_TABLE_INDEX; -} - -static UINT GetPhsRuleEntry(IN struct bcm_phs_classifier_table *pstClassifierTable, - IN B_UINT32 uiPHSI, - enum bcm_phs_classifier_context eClsContext, - OUT struct bcm_phs_rule **ppstPhsRule) -{ - int i; - struct bcm_phs_classifier_entry *pstClassifierRule = NULL; - - for (i = 0; i < MAX_PHSRULE_PER_SF; i++) { - if (eClsContext == eActiveClassifierRuleContext) - pstClassifierRule = - &pstClassifierTable->stActivePhsRulesList[i]; - else - pstClassifierRule = - &pstClassifierTable->stOldPhsRulesList[i]; - - if (pstClassifierRule->bUsed && - (pstClassifierRule->u8PHSI == uiPHSI)) { - *ppstPhsRule = pstClassifierRule->pstPhsRule; - return i; - } - } - - *ppstPhsRule = NULL; - return PHS_INVALID_TABLE_INDEX; -} - -static UINT CreateSFToClassifierRuleMapping(IN B_UINT16 uiVcid, - IN B_UINT16 uiClsId, - IN struct bcm_phs_table *psServiceFlowTable, - struct bcm_phs_rule *psPhsRule, - B_UINT8 u8AssociatedPHSI) -{ - struct bcm_phs_classifier_table *psaClassifiertable = NULL; - UINT uiStatus = 0; - int iSfIndex; - bool bFreeEntryFound = false; - struct bcm_phs_entry *curr_list; - - /* Check for a free entry in SFID table */ - for (iSfIndex = 0; iSfIndex < MAX_SERVICEFLOWS; iSfIndex++) { - curr_list = &psServiceFlowTable->stSFList[iSfIndex]; - if (!curr_list->bUsed) { - bFreeEntryFound = TRUE; - break; - } - } - - if (!bFreeEntryFound) - return ERR_SFTABLE_FULL; - - psaClassifiertable = curr_list->pstClassifierTable; - uiStatus = CreateClassifierPHSRule(uiClsId, - psaClassifiertable, - psPhsRule, - eActiveClassifierRuleContext, - u8AssociatedPHSI); - if (uiStatus == PHS_SUCCESS) { - /* Add entry at free index to the SF */ - curr_list->bUsed = TRUE; - curr_list->uiVcid = uiVcid; - } - - return uiStatus; -} - -static UINT CreateClassiferToPHSRuleMapping(IN B_UINT16 uiVcid, - IN B_UINT16 uiClsId, - IN struct bcm_phs_entry *pstServiceFlowEntry, - struct bcm_phs_rule *psPhsRule, - B_UINT8 u8AssociatedPHSI) -{ - struct bcm_phs_classifier_entry *pstClassifierEntry = NULL; - UINT uiStatus = PHS_SUCCESS; - UINT nClassifierIndex = 0; - struct bcm_phs_classifier_table *psaClassifiertable = NULL; - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - - psaClassifiertable = pstServiceFlowEntry->pstClassifierTable; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, - "==>"); - - /* Check if the supplied Classifier already exists */ - nClassifierIndex = GetClassifierEntry( - pstServiceFlowEntry->pstClassifierTable, - uiClsId, - eActiveClassifierRuleContext, - &pstClassifierEntry); - - if (nClassifierIndex == PHS_INVALID_TABLE_INDEX) { - /* - * The Classifier doesn't exist. So its a new classifier being - * added. - * Add new entry to associate PHS Rule to the Classifier - */ - - uiStatus = CreateClassifierPHSRule(uiClsId, psaClassifiertable, - psPhsRule, - eActiveClassifierRuleContext, - u8AssociatedPHSI); - return uiStatus; - } - - /* - * The Classifier exists.The PHS Rule for this classifier - * is being modified - */ - - if (pstClassifierEntry->u8PHSI == psPhsRule->u8PHSI) { - if (pstClassifierEntry->pstPhsRule == NULL) - return ERR_PHS_INVALID_PHS_RULE; - - /* - * This rule already exists if any fields are changed for this - * PHS rule update them. - */ - /* If any part of PHSF is valid then we update PHSF */ - if (psPhsRule->u8PHSFLength) { - /* update PHSF */ - memcpy(pstClassifierEntry->pstPhsRule->u8PHSF, - psPhsRule->u8PHSF, - MAX_PHS_LENGTHS); - } - - if (psPhsRule->u8PHSFLength) { - /* update PHSFLen */ - pstClassifierEntry->pstPhsRule->u8PHSFLength = - psPhsRule->u8PHSFLength; - } - - if (psPhsRule->u8PHSMLength) { - /* update PHSM */ - memcpy(pstClassifierEntry->pstPhsRule->u8PHSM, - psPhsRule->u8PHSM, - MAX_PHS_LENGTHS); - } - - if (psPhsRule->u8PHSMLength) { - /* update PHSM Len */ - pstClassifierEntry->pstPhsRule->u8PHSMLength = - psPhsRule->u8PHSMLength; - } - - if (psPhsRule->u8PHSS) { - /* update PHSS */ - pstClassifierEntry->pstPhsRule->u8PHSS = - psPhsRule->u8PHSS; - } - - /* update PHSV */ - pstClassifierEntry->pstPhsRule->u8PHSV = psPhsRule->u8PHSV; - } else { - /* A new rule is being set for this classifier. */ - uiStatus = UpdateClassifierPHSRule(uiClsId, - pstClassifierEntry, - psaClassifiertable, - psPhsRule, - u8AssociatedPHSI); - } - - return uiStatus; -} - -static UINT CreateClassifierPHSRule(IN B_UINT16 uiClsId, - struct bcm_phs_classifier_table *psaClassifiertable, - struct bcm_phs_rule *psPhsRule, - enum bcm_phs_classifier_context eClsContext, - B_UINT8 u8AssociatedPHSI) -{ - UINT iClassifierIndex = 0; - bool bFreeEntryFound = false; - struct bcm_phs_classifier_entry *psClassifierRules = NULL; - UINT nStatus = PHS_SUCCESS; - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, - "Inside CreateClassifierPHSRule"); - - if (psaClassifiertable == NULL) - return ERR_INVALID_CLASSIFIERTABLE_FOR_SF; - - if (eClsContext == eOldClassifierRuleContext) { - /* - * If An Old Entry for this classifier ID already exists in the - * old rules table replace it. - */ - - iClassifierIndex = GetClassifierEntry(psaClassifiertable, - uiClsId, - eClsContext, - &psClassifierRules); - - if (iClassifierIndex != PHS_INVALID_TABLE_INDEX) { - /* - * The Classifier already exists in the old rules table - * Lets replace the old classifier with the new one. - */ - bFreeEntryFound = TRUE; - } - } - - if (!bFreeEntryFound) { - /* Continue to search for a free location to add the rule */ - for (iClassifierIndex = 0; iClassifierIndex < - MAX_PHSRULE_PER_SF; iClassifierIndex++) { - if (eClsContext == eActiveClassifierRuleContext) - psClassifierRules = &psaClassifiertable->stActivePhsRulesList[iClassifierIndex]; - else - psClassifierRules = &psaClassifiertable->stOldPhsRulesList[iClassifierIndex]; - - if (!psClassifierRules->bUsed) { - bFreeEntryFound = TRUE; - break; - } - } - } - - if (!bFreeEntryFound) { - - if (eClsContext == eActiveClassifierRuleContext) - return ERR_CLSASSIFIER_TABLE_FULL; - /* Lets replace the oldest rule if we are looking in - * old Rule table */ - if (psaClassifiertable->uiOldestPhsRuleIndex >= MAX_PHSRULE_PER_SF) - psaClassifiertable->uiOldestPhsRuleIndex = 0; - - iClassifierIndex = psaClassifiertable->uiOldestPhsRuleIndex; - psClassifierRules = &psaClassifiertable->stOldPhsRulesList[iClassifierIndex]; - - psaClassifiertable->uiOldestPhsRuleIndex++; - } - - if (eClsContext == eOldClassifierRuleContext) { - - if (psClassifierRules->pstPhsRule == NULL) { - - psClassifierRules->pstPhsRule = - kmalloc(sizeof(struct bcm_phs_rule), - GFP_KERNEL); - - if (NULL == psClassifierRules->pstPhsRule) - return ERR_PHSRULE_MEMALLOC_FAIL; - } - - psClassifierRules->bUsed = TRUE; - psClassifierRules->uiClassifierRuleId = uiClsId; - psClassifierRules->u8PHSI = psPhsRule->u8PHSI; - psClassifierRules->bUnclassifiedPHSRule = - psPhsRule->bUnclassifiedPHSRule; - - /* Update The PHS rule */ - memcpy(psClassifierRules->pstPhsRule, psPhsRule, - sizeof(struct bcm_phs_rule)); - } else - nStatus = UpdateClassifierPHSRule(uiClsId, - psClassifierRules, - psaClassifiertable, - psPhsRule, - u8AssociatedPHSI); - - return nStatus; -} - -static UINT UpdateClassifierPHSRule(IN B_UINT16 uiClsId, - IN struct bcm_phs_classifier_entry *pstClassifierEntry, - struct bcm_phs_classifier_table *psaClassifiertable, - struct bcm_phs_rule *psPhsRule, - B_UINT8 u8AssociatedPHSI) -{ - struct bcm_phs_rule *pstAddPhsRule = NULL; - UINT nPhsRuleIndex = 0; - bool bPHSRuleOrphaned = false; - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - - psPhsRule->u8RefCnt = 0; - - /* Step 1 Deref Any Exisiting PHS Rule in this classifier Entry */ - bPHSRuleOrphaned = DerefPhsRule(uiClsId, psaClassifiertable, - pstClassifierEntry->pstPhsRule); - - /* Step 2 Search if there is a PHS Rule with u8AssociatedPHSI in - * Classifier table for this SF */ - nPhsRuleIndex = GetPhsRuleEntry(psaClassifiertable, u8AssociatedPHSI, - eActiveClassifierRuleContext, - &pstAddPhsRule); - if (PHS_INVALID_TABLE_INDEX == nPhsRuleIndex) { - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, - DBG_LVL_ALL, - "\nAdding New PHSRuleEntry For Classifier"); - - if (psPhsRule->u8PHSI == 0) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, - DBG_LVL_ALL, "\nError PHSI is Zero\n"); - return ERR_PHS_INVALID_PHS_RULE; - } - - /* Step 2.a PHS Rule Does Not Exist .Create New PHS Rule for - * uiClsId */ - if (false == bPHSRuleOrphaned) { - - pstClassifierEntry->pstPhsRule = - kmalloc(sizeof(struct bcm_phs_rule), - GFP_KERNEL); - if (NULL == pstClassifierEntry->pstPhsRule) - return ERR_PHSRULE_MEMALLOC_FAIL; - } - memcpy(pstClassifierEntry->pstPhsRule, psPhsRule, - sizeof(struct bcm_phs_rule)); - } else { - /* Step 2.b PHS Rule Exists Tie uiClsId with the existing - * PHS Rule */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, - DBG_LVL_ALL, - "\nTying Classifier to Existing PHS Rule"); - if (bPHSRuleOrphaned) { - kfree(pstClassifierEntry->pstPhsRule); - pstClassifierEntry->pstPhsRule = NULL; - } - pstClassifierEntry->pstPhsRule = pstAddPhsRule; - } - - pstClassifierEntry->bUsed = TRUE; - pstClassifierEntry->u8PHSI = pstClassifierEntry->pstPhsRule->u8PHSI; - pstClassifierEntry->uiClassifierRuleId = uiClsId; - pstClassifierEntry->pstPhsRule->u8RefCnt++; - pstClassifierEntry->bUnclassifiedPHSRule = - pstClassifierEntry->pstPhsRule->bUnclassifiedPHSRule; - - return PHS_SUCCESS; -} - -static bool DerefPhsRule(IN B_UINT16 uiClsId, - struct bcm_phs_classifier_table *psaClassifiertable, - struct bcm_phs_rule *pstPhsRule) -{ - if (pstPhsRule == NULL) - return false; - - if (pstPhsRule->u8RefCnt) - pstPhsRule->u8RefCnt--; - - return (0 == pstPhsRule->u8RefCnt); -} - -static void dbg_print_st_cls_entry(struct bcm_mini_adapter *ad, - struct bcm_phs_entry *st_serv_flow_entry, - struct bcm_phs_classifier_entry *st_cls_entry) -{ - int k; - - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\n VCID : %#X", st_serv_flow_entry->uiVcid); - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n ClassifierID : %#X", st_cls_entry->uiClassifierRuleId); - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSRuleID : %#X", st_cls_entry->u8PHSI); - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n****************PHS Rule********************\n"); - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSI : %#X", st_cls_entry->pstPhsRule->u8PHSI); - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSFLength : %#X ", st_cls_entry->pstPhsRule->u8PHSFLength); - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSF : "); - - for (k = 0 ; k < st_cls_entry->pstPhsRule->u8PHSFLength; k++) - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "%#X ", st_cls_entry->pstPhsRule->u8PHSF[k]); - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSMLength : %#X", st_cls_entry->pstPhsRule->u8PHSMLength); - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSM :"); - - for (k = 0; k < st_cls_entry->pstPhsRule->u8PHSMLength; k++) - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "%#X ", st_cls_entry->pstPhsRule->u8PHSM[k]); - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSS : %#X ", st_cls_entry->pstPhsRule->u8PHSS); - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSV : %#X", st_cls_entry->pstPhsRule->u8PHSV); - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\n********************************************\n"); -} - -static void phsrules_per_sf_dbg_print(struct bcm_mini_adapter *ad, - struct bcm_phs_entry *st_serv_flow_entry) -{ - int j, l; - struct bcm_phs_classifier_entry st_cls_entry; - - for (j = 0; j < MAX_PHSRULE_PER_SF; j++) { - - for (l = 0; l < 2; l++) { - - if (l == 0) { - st_cls_entry = st_serv_flow_entry->pstClassifierTable->stActivePhsRulesList[j]; - if (st_cls_entry.bUsed) - BCM_DEBUG_PRINT(ad, - DBG_TYPE_OTHERS, - DUMP_INFO, - (DBG_LVL_ALL | DBG_NO_FUNC_PRINT), - "\n Active PHS Rule :\n"); - } else { - st_cls_entry = st_serv_flow_entry->pstClassifierTable->stOldPhsRulesList[j]; - if (st_cls_entry.bUsed) - BCM_DEBUG_PRINT(ad, - DBG_TYPE_OTHERS, - DUMP_INFO, - (DBG_LVL_ALL | DBG_NO_FUNC_PRINT), - "\n Old PHS Rule :\n"); - } - - if (st_cls_entry.bUsed) { - dbg_print_st_cls_entry(ad, - st_serv_flow_entry, - &st_cls_entry); - } - } - } -} - -void DumpPhsRules(struct bcm_phs_extension *pDeviceExtension) -{ - int i; - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, - "\n Dumping PHS Rules :\n"); - - for (i = 0; i < MAX_SERVICEFLOWS; i++) { - - struct bcm_phs_entry stServFlowEntry = - pDeviceExtension->pstServiceFlowPhsRulesTable->stSFList[i]; - - if (!stServFlowEntry.bUsed) - continue; - - phsrules_per_sf_dbg_print(Adapter, &stServFlowEntry); - } -} - -/* - * Procedure: phs_decompress - * - * Description: This routine restores the static fields within the packet. - * - * Arguments: - * in_buf - ptr to incoming packet buffer. - * out_buf - ptr to output buffer where the suppressed - * header is copied. - * decomp_phs_rules - ptr to PHS rule. - * header_size - ptr to field which holds the phss or - * phsf_length. - * - * Returns: - * size - The number of bytes of dynamic fields present with in the - * incoming packet header. - * 0 - If PHS rule is NULL.If PHSI is 0 indicateing packet as - * uncompressed. - */ -static int phs_decompress(unsigned char *in_buf, - unsigned char *out_buf, - struct bcm_phs_rule *decomp_phs_rules, - UINT *header_size) -{ - int phss, size = 0; - struct bcm_phs_rule *tmp_memb; - int bit, i = 0; - unsigned char *phsf, *phsm; - int in_buf_len = *header_size - 1; - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - - in_buf++; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL, - "====>\n"); - *header_size = 0; - - if (decomp_phs_rules == NULL) - return 0; - - tmp_memb = decomp_phs_rules; - /* - * BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_RECEIVE,DBG_LVL_ALL, - * "\nDECOMP:In phs_decompress PHSI 1 %d",phsi)); - * header_size = tmp_memb->u8PHSFLength; - */ - phss = tmp_memb->u8PHSS; - phsf = tmp_memb->u8PHSF; - phsm = tmp_memb->u8PHSM; - - if (phss > MAX_PHS_LENGTHS) - phss = MAX_PHS_LENGTHS; - - /* - * BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_RECEIVE,DBG_LVL_ALL, - * "\nDECOMP: - * In phs_decompress PHSI %d phss %d index %d",phsi,phss,index)); - */ - while ((phss > 0) && (size < in_buf_len)) { - bit = ((*phsm << i) & SUPPRESS); - - if (bit == SUPPRESS) { - *out_buf = *phsf; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, - DBG_LVL_ALL, - "\nDECOMP:In phss %d phsf %d output %d", - phss, *phsf, *out_buf); - } else { - *out_buf = *in_buf; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, - DBG_LVL_ALL, - "\nDECOMP:In phss %d input %d output %d", - phss, *in_buf, *out_buf); - in_buf++; - size++; - } - out_buf++; - phsf++; - phss--; - i++; - *header_size = *header_size + 1; - - if (i > MAX_NO_BIT) { - i = 0; - phsm++; - } - } - - return size; -} - -/* - * Procedure: phs_compress - * - * Description: This routine suppresses the static fields within the packet. - * Before that it will verify the fields to be suppressed with the corresponding - * fields in the phsf. For verification it checks the phsv field of PHS rule. - * If set and verification succeeds it suppresses the field.If any one static - * field is found different none of the static fields are suppressed then the - * packet is sent as uncompressed packet with phsi=0. - * - * Arguments: - * phs_rule - ptr to PHS rule. - * in_buf - ptr to incoming packet buffer. - * out_buf - ptr to output buffer where the suppressed header is - * copied. - * header_size - ptr to field which holds the phss. - * - * Returns: - * size - The number of bytes copied into the output buffer i.e - * dynamic fields - * 0 - If PHS rule is NULL.If PHSV field is not set. If the - * verification fails. - */ -static int phs_compress(struct bcm_phs_rule *phs_rule, - unsigned char *in_buf, - unsigned char *out_buf, - UINT *header_size, - UINT *new_header_size) -{ - unsigned char *old_addr = out_buf; - int suppress = 0; - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - - if (phs_rule == NULL) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL, - "\nphs_compress(): phs_rule null!"); - *out_buf = ZERO_PHSI; - return STATUS_PHS_NOCOMPRESSION; - } - - if (phs_rule->u8PHSS <= *new_header_size) - *header_size = phs_rule->u8PHSS; - else - *header_size = *new_header_size; - - /* To copy PHSI */ - out_buf++; - suppress = verify_suppress_phsf(in_buf, out_buf, phs_rule->u8PHSF, - phs_rule->u8PHSM, phs_rule->u8PHSS, - phs_rule->u8PHSV, new_header_size); - - if (suppress == STATUS_PHS_COMPRESSED) { - *old_addr = (unsigned char)phs_rule->u8PHSI; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL, - "\nCOMP:In phs_compress phsi %d", - phs_rule->u8PHSI); - } else { - *old_addr = ZERO_PHSI; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL, - "\nCOMP:In phs_compress PHSV Verification failed"); - } - - return suppress; -} - -/* - * Procedure: verify_suppress_phsf - * - * Description: This routine verifies the fields of the packet and if all the - * static fields are equal it adds the phsi of that PHS rule.If any static - * field differs it woun't suppress any field. - * - * Arguments: - * rules_set - ptr to classifier_rules. - * in_buffer - ptr to incoming packet buffer. - * out_buffer - ptr to output buffer where the suppressed header is copied. - * phsf - ptr to phsf. - * phsm - ptr to phsm. - * phss - variable holding phss. - * - * Returns: - * size - The number of bytes copied into the output buffer i.e dynamic - * fields. - * 0 - Packet has failed the verification. - */ -static int verify_suppress_phsf(unsigned char *in_buffer, - unsigned char *out_buffer, - unsigned char *phsf, - unsigned char *phsm, - unsigned int phss, - unsigned int phsv, - UINT *new_header_size) -{ - unsigned int size = 0; - int bit, i = 0; - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL, - "\nCOMP:In verify_phsf PHSM - 0x%X", *phsm); - - if (phss > (*new_header_size)) - phss = *new_header_size; - - while (phss > 0) { - bit = ((*phsm << i) & SUPPRESS); - if (bit == SUPPRESS) { - if (*in_buffer != *phsf) { - if (phsv == VERIFY) { - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_OTHERS, - PHS_SEND, - DBG_LVL_ALL, - "\nCOMP:In verify_phsf failed for field %d buf %d phsf %d", - phss, - *in_buffer, - *phsf); - return STATUS_PHS_NOCOMPRESSION; - } - } else - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_OTHERS, - PHS_SEND, - DBG_LVL_ALL, - "\nCOMP:In verify_phsf success for field %d buf %d phsf %d", - phss, - *in_buffer, - *phsf); - } else { - *out_buffer = *in_buffer; - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_OTHERS, - PHS_SEND, - DBG_LVL_ALL, - "\nCOMP:In copying_header input %d out %d", - *in_buffer, - *out_buffer); - out_buffer++; - size++; - } - - in_buffer++; - phsf++; - phss--; - i++; - - if (i > MAX_NO_BIT) { - i = 0; - phsm++; - } - } - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL, - "\nCOMP:In verify_phsf success"); - *new_header_size = size; - return STATUS_PHS_COMPRESSED; -} diff --git a/drivers/staging/bcm/PHSModule.h b/drivers/staging/bcm/PHSModule.h deleted file mode 100644 index d84d60ba48f9..000000000000 --- a/drivers/staging/bcm/PHSModule.h +++ /dev/null @@ -1,59 +0,0 @@ -#ifndef BCM_MINIPORT_PHSMODULE_H -#define BCM_MINIPORT_PHSMODULE_H - -int PHSTransmit(struct bcm_mini_adapter *Adapter, - struct sk_buff **pPacket, - USHORT Vcid, - B_UINT16 uiClassifierRuleID, - bool bHeaderSuppressionEnabled, - PUINT PacketLen, - UCHAR bEthCSSupport); - -int PHSReceive(struct bcm_mini_adapter *Adapter, - USHORT usVcid, - struct sk_buff *packet, - UINT *punPacketLen, - UCHAR *pucEthernetHdr, - UINT - ); - - -void DumpDataPacketHeader(PUCHAR pPkt); - -void DumpFullPacket(UCHAR *pBuf, UINT nPktLen); - -void DumpPhsRules(struct bcm_phs_extension *pDeviceExtension); - - -int phs_init(struct bcm_phs_extension *pPhsdeviceExtension, - struct bcm_mini_adapter *Adapter); - -int PhsCleanup(struct bcm_phs_extension *pPHSDeviceExt); - -/* Utility Functions */ -ULONG PhsUpdateClassifierRule(void *pvContext, - B_UINT16 uiVcid, - B_UINT16 uiClsId, - struct bcm_phs_rule *psPhsRule, - B_UINT8 u8AssociatedPHSI); - -ULONG PhsDeletePHSRule(void *pvContext, B_UINT16 uiVcid, B_UINT8 u8PHSI); - -ULONG PhsDeleteClassifierRule(void *pvContext, - B_UINT16 uiVcid, - B_UINT16 uiClsId); - -ULONG PhsDeleteSFRules(void *pvContext, B_UINT16 uiVcid); - - -bool ValidatePHSRule(struct bcm_phs_rule *psPhsRule); - -UINT GetServiceFlowEntry(struct bcm_phs_table *psServiceFlowTable, - B_UINT16 uiVcid, - struct bcm_phs_entry **ppstServiceFlowEntry); - - -void DumpPhsRules(struct bcm_phs_extension *pDeviceExtension); - - -#endif diff --git a/drivers/staging/bcm/Protocol.h b/drivers/staging/bcm/Protocol.h deleted file mode 100644 index 9818128d9320..000000000000 --- a/drivers/staging/bcm/Protocol.h +++ /dev/null @@ -1,128 +0,0 @@ -/************************************ -* Protocol.h -*************************************/ -#ifndef __PROTOCOL_H__ -#define __PROTOCOL_H__ - -#define IPV4 4 -#define IPV6 6 - -struct ArpHeader { - struct arphdr arp; - unsigned char ar_sha[ETH_ALEN]; /* sender hardware address */ - unsigned char ar_sip[4]; /* sender IP address */ - unsigned char ar_tha[ETH_ALEN]; /* target hardware address */ - unsigned char ar_tip[4]; /* target IP address */ -}; - -struct bcm_transport_header { - union { - struct udphdr uhdr; - struct tcphdr thdr; - }; -} __packed; - -enum bcm_ip_frame_type { - eNonIPPacket, - eIPv4Packet, - eIPv6Packet -}; - -enum bcm_eth_frame_type { - eEthUnsupportedFrame, - eEth802LLCFrame, - eEth802LLCSNAPFrame, - eEth802QVLANFrame, - eEthOtherFrame -}; - -struct bcm_eth_packet_info { - enum bcm_ip_frame_type eNwpktIPFrameType; - enum bcm_eth_frame_type eNwpktEthFrameType; - unsigned short usEtherType; - unsigned char ucDSAP; -}; - -struct bcm_eth_q_frame { - struct bcm_eth_header EThHdr; - unsigned short UserPriority:3; - unsigned short CFI:1; - unsigned short VLANID:12; - unsigned short EthType; -} __packed; - -struct bcm_eth_llc_frame { - struct bcm_eth_header EThHdr; - unsigned char DSAP; - unsigned char SSAP; - unsigned char Control; -} __packed; - -struct bcm_eth_llc_snap_frame { - struct bcm_eth_header EThHdr; - unsigned char DSAP; - unsigned char SSAP; - unsigned char Control; - unsigned char OUI[3]; - unsigned short usEtherType; -} __packed; - -struct bcm_ethernet2_frame { - struct bcm_eth_header EThHdr; -} __packed; - -#define ETHERNET_FRAMETYPE_IPV4 ntohs(0x0800) -#define ETHERNET_FRAMETYPE_IPV6 ntohs(0x86dd) -#define ETHERNET_FRAMETYPE_802QVLAN ntohs(0x8100) - -/* Per SF CS Specification Encodings */ -enum bcm_spec_encoding { - eCSSpecUnspecified = 0, - eCSPacketIPV4, - eCSPacketIPV6, - eCS802_3PacketEthernet, - eCS802_1QPacketVLAN, - eCSPacketIPV4Over802_3Ethernet, - eCSPacketIPV6Over802_3Ethernet, - eCSPacketIPV4Over802_1QVLAN, - eCSPacketIPV6Over802_1QVLAN, - eCSPacketUnsupported -}; - -#define IP6_HEADER_LEN 40 -#define IP_VERSION(byte) (((byte&0xF0)>>4)) - -#define MAC_ADDRESS_SIZE 6 -#define ETH_AND_IP_HEADER_LEN (14 + 20) -#define L4_SRC_PORT_LEN 2 -#define L4_DEST_PORT_LEN 2 -#define CTRL_PKT_LEN (8 + ETH_AND_IP_HEADER_LEN) - -#define ETH_ARP_FRAME 0x806 -#define ETH_IPV4_FRAME 0x800 -#define ETH_IPV6_FRAME 0x86DD -#define UDP 0x11 -#define TCP 0x06 - -#define ARP_OP_REQUEST 0x01 -#define ARP_OP_REPLY 0x02 -#define ARP_PKT_SIZE 60 - -/* This is the format for the TCP packet header */ -struct bcm_tcp_header { - unsigned short usSrcPort; - unsigned short usDestPort; - unsigned long ulSeqNumber; - unsigned long ulAckNumber; - unsigned char HeaderLength; - unsigned char ucFlags; - unsigned short usWindowsSize; - unsigned short usChkSum; - unsigned short usUrgetPtr; -}; - -#define TCP_HEADER_LEN sizeof(struct bcm_tcp_header) -#define TCP_ACK 0x10 /* Bit 4 in tcpflags field. */ -#define GET_TCP_HEADER_LEN(byte) ((byte&0xF0)>>4) - -#endif /* __PROTOCOL_H__ */ diff --git a/drivers/staging/bcm/Prototypes.h b/drivers/staging/bcm/Prototypes.h deleted file mode 100644 index 1ddc8b2539f6..000000000000 --- a/drivers/staging/bcm/Prototypes.h +++ /dev/null @@ -1,217 +0,0 @@ -#ifndef _PROTOTYPES_H_ -#define _PROTOTYPES_H_ - -VOID LinkControlResponseMessage(struct bcm_mini_adapter *Adapter, PUCHAR pucBuffer); - -VOID StatisticsResponse(struct bcm_mini_adapter *Adapter, PVOID pvBuffer); - -VOID IdleModeResponse(struct bcm_mini_adapter *Adapter, PUINT puiBuffer); - -int control_packet_handler(struct bcm_mini_adapter *Adapter); - -VOID DeleteAllClassifiersForSF(struct bcm_mini_adapter *Adapter, UINT uiSearchRuleIndex); - -VOID flush_all_queues(struct bcm_mini_adapter *Adapter); - -int register_control_device_interface(struct bcm_mini_adapter *ps_adapter); - -void unregister_control_device_interface(struct bcm_mini_adapter *Adapter); - -INT CopyBufferToControlPacket(struct bcm_mini_adapter *Adapter,/**ucIPSourceAddressLength) - return TRUE; - for (ucLoopIndex = 0; - ucLoopIndex < (pstClassifierRule->ucIPSourceAddressLength); - ucLoopIndex++) { - src_addr = &pstClassifierRule->stSrcIpAddress; - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "Src Ip Address Mask:0x%x PacketIp:0x%x and Classification:0x%x", - (UINT)src_addr->ulIpv4Mask[ucLoopIndex], - (UINT)ulSrcIP, - (UINT)src_addr->ulIpv6Addr[ucLoopIndex]); - - if ((src_addr->ulIpv4Mask[ucLoopIndex] & ulSrcIP) == - (src_addr->ulIpv4Addr[ucLoopIndex] & - src_addr->ulIpv4Mask[ucLoopIndex])) - return TRUE; - } - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "Src Ip Address Not Matched"); - return false; -} - - -/******************************************************************* -* Function - MatchDestIpAddress() -* -* Description - Checks whether the Destination IP address from the packet -* matches with that of Queue. -* -* Parameters - pstClassifierRule: Pointer to the packet info structure. -* - ulDestIP : Destination IP address from the packet. -* -* Returns - TRUE(If address matches) else FAIL . -*********************************************************************/ -static bool MatchDestIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG ulDestIP) -{ - UCHAR ucLoopIndex = 0; - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - union u_ip_address *dest_addr = &pstClassifierRule->stDestIpAddress; - - ulDestIP = ntohl(ulDestIP); - if (0 == pstClassifierRule->ucIPDestinationAddressLength) - return TRUE; - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "Destination Ip Address 0x%x 0x%x 0x%x ", - (UINT)ulDestIP, - (UINT)dest_addr->ulIpv4Mask[ucLoopIndex], - (UINT)dest_addr->ulIpv4Addr[ucLoopIndex]); - - for (ucLoopIndex = 0; - ucLoopIndex < (pstClassifierRule->ucIPDestinationAddressLength); - ucLoopIndex++) { - if ((dest_addr->ulIpv4Mask[ucLoopIndex] & ulDestIP) == - (dest_addr->ulIpv4Addr[ucLoopIndex] & - dest_addr->ulIpv4Mask[ucLoopIndex])) - return TRUE; - } - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "Destination Ip Address Not Matched"); - return false; -} - - -/************************************************************************ -* Function - MatchTos() -* -* Description - Checks the TOS from the packet matches with that of queue. -* -* Parameters - pstClassifierRule : Pointer to the packet info structure. -* - ucTypeOfService: TOS from the packet. -* -* Returns - TRUE(If address matches) else FAIL. -**************************************************************************/ -static bool MatchTos(struct bcm_classifier_rule *pstClassifierRule, - UCHAR ucTypeOfService) -{ - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - - if (3 != pstClassifierRule->ucIPTypeOfServiceLength) - return TRUE; - - if (((pstClassifierRule->ucTosMask & ucTypeOfService) <= - pstClassifierRule->ucTosHigh) && - ((pstClassifierRule->ucTosMask & ucTypeOfService) >= - pstClassifierRule->ucTosLow)) - return TRUE; - - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "Type Of Service Not Matched"); - return false; -} - - -/*************************************************************************** -* Function - MatchProtocol() -* -* Description - Checks the protocol from the packet matches with that of queue. -* -* Parameters - pstClassifierRule: Pointer to the packet info structure. -* - ucProtocol : Protocol from the packet. -* -* Returns - TRUE(If address matches) else FAIL. -****************************************************************************/ -bool MatchProtocol(struct bcm_classifier_rule *pstClassifierRule, - UCHAR ucProtocol) -{ - UCHAR ucLoopIndex = 0; - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - - if (0 == pstClassifierRule->ucProtocolLength) - return TRUE; - for (ucLoopIndex = 0; - ucLoopIndex < pstClassifierRule->ucProtocolLength; - ucLoopIndex++) { - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "Protocol:0x%X Classification Protocol:0x%X", - ucProtocol, - pstClassifierRule->ucProtocol[ucLoopIndex]); - if (pstClassifierRule->ucProtocol[ucLoopIndex] == ucProtocol) - return TRUE; - } - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "Protocol Not Matched"); - return false; -} - - -/*********************************************************************** -* Function - MatchSrcPort() -* -* Description - Checks, Source port from the packet matches with that of queue. -* -* Parameters - pstClassifierRule: Pointer to the packet info structure. -* - ushSrcPort : Source port from the packet. -* -* Returns - TRUE(If address matches) else FAIL. -***************************************************************************/ -bool MatchSrcPort(struct bcm_classifier_rule *pstClassifierRule, - USHORT ushSrcPort) -{ - UCHAR ucLoopIndex = 0; - - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - - - if (0 == pstClassifierRule->ucSrcPortRangeLength) - return TRUE; - for (ucLoopIndex = 0; - ucLoopIndex < pstClassifierRule->ucSrcPortRangeLength; - ucLoopIndex++) { - if (ushSrcPort <= pstClassifierRule->usSrcPortRangeHi[ucLoopIndex] && - ushSrcPort >= pstClassifierRule->usSrcPortRangeLo[ucLoopIndex]) - return TRUE; - } - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "Src Port: %x Not Matched ", - ushSrcPort); - return false; -} - - -/*********************************************************************** -* Function - MatchDestPort() -* -* Description - Checks, Destination port from packet matches with that of queue. -* -* Parameters - pstClassifierRule: Pointer to the packet info structure. -* - ushDestPort : Destination port from the packet. -* -* Returns - TRUE(If address matches) else FAIL. -***************************************************************************/ -bool MatchDestPort(struct bcm_classifier_rule *pstClassifierRule, - USHORT ushDestPort) -{ - UCHAR ucLoopIndex = 0; - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - - if (0 == pstClassifierRule->ucDestPortRangeLength) - return TRUE; - - for (ucLoopIndex = 0; - ucLoopIndex < pstClassifierRule->ucDestPortRangeLength; - ucLoopIndex++) { - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "Matching Port:0x%X 0x%X 0x%X", - ushDestPort, - pstClassifierRule->usDestPortRangeLo[ucLoopIndex], - pstClassifierRule->usDestPortRangeHi[ucLoopIndex]); - - if (ushDestPort <= pstClassifierRule->usDestPortRangeHi[ucLoopIndex] && - ushDestPort >= pstClassifierRule->usDestPortRangeLo[ucLoopIndex]) - return TRUE; - } - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "Dest Port: %x Not Matched", - ushDestPort); - return false; -} -/** - * @ingroup tx_functions - * Compares IPV4 Ip address and port number - * @return Queue Index. -*/ -static USHORT IpVersion4(struct bcm_mini_adapter *Adapter, - struct iphdr *iphd, - struct bcm_classifier_rule *pstClassifierRule) -{ - struct bcm_transport_header *xprt_hdr = NULL; - bool bClassificationSucceed = false; - - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "========>"); - - xprt_hdr = (struct bcm_transport_header *)((PUCHAR)iphd + sizeof(struct iphdr)); - - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "Trying to see Direction = %d %d", - pstClassifierRule->ucDirection, - pstClassifierRule->usVCID_Value); - - /* Checking classifier validity */ - if (!pstClassifierRule->bUsed || - pstClassifierRule->ucDirection == DOWNLINK_DIR) - goto out; - - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "is IPv6 check!"); - if (pstClassifierRule->bIpv6Protocol) - goto out; - - /* Checking IP header parameter */ - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "Trying to match Source IP Address"); - if (!MatchSrcIpAddress(pstClassifierRule, iphd->saddr)) - goto out; - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "Source IP Address Matched"); - - if (!MatchDestIpAddress(pstClassifierRule, iphd->daddr)) - goto out; - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "Destination IP Address Matched"); - - if (!MatchTos(pstClassifierRule, iphd->tos)) { - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "TOS Match failed\n"); - goto out; - } - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "TOS Matched"); - - if (!MatchProtocol(pstClassifierRule, iphd->protocol)) - goto out; - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "Protocol Matched"); - - /* - * if protocol is not TCP or UDP then no - * need of comparing source port and destination port - */ - if (iphd->protocol != TCP && iphd->protocol != UDP) { - bClassificationSucceed = TRUE; - goto out; - } - /* Checking Transport Layer Header field if present */ - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "Source Port %04x", - (iphd->protocol == UDP) ? xprt_hdr->uhdr.source : xprt_hdr->thdr.source); - - if (!MatchSrcPort(pstClassifierRule, - ntohs((iphd->protocol == UDP) ? - xprt_hdr->uhdr.source : xprt_hdr->thdr.source))) - goto out; - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "Src Port Matched"); - - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "Destination Port %04x", - (iphd->protocol == UDP) ? xprt_hdr->uhdr.dest : - xprt_hdr->thdr.dest); - - if (!MatchDestPort(pstClassifierRule, - ntohs((iphd->protocol == UDP) ? - xprt_hdr->uhdr.dest : xprt_hdr->thdr.dest))) - goto out; - bClassificationSucceed = TRUE; - -out: - if (TRUE == bClassificationSucceed) { - INT iMatchedSFQueueIndex = 0; - - iMatchedSFQueueIndex = - SearchSfid(Adapter, pstClassifierRule->ulSFID); - if (iMatchedSFQueueIndex >= NO_OF_QUEUES) - bClassificationSucceed = false; - else if (false == Adapter->PackInfo[iMatchedSFQueueIndex].bActive) - bClassificationSucceed = false; - } - - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "IpVersion4 <=========="); - - return bClassificationSucceed; -} - -VOID PruneQueueAllSF(struct bcm_mini_adapter *Adapter) -{ - UINT iIndex = 0; - - for (iIndex = 0; iIndex < HiPriority; iIndex++) { - if (!Adapter->PackInfo[iIndex].bValid) - continue; - - PruneQueue(Adapter, iIndex); - } -} - - -/** - * @ingroup tx_functions - * This function checks if the max queue size for a queue - * is less than number of bytes in the queue. If so - - * drops packets from the Head till the number of bytes is - * less than or equal to max queue size for the queue. - */ -static VOID PruneQueue(struct bcm_mini_adapter *Adapter, INT iIndex) -{ - struct sk_buff *PacketToDrop = NULL; - struct net_device_stats *netstats; - struct bcm_packet_info *curr_pack_info = &Adapter->PackInfo[iIndex]; - - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - PRUNE_QUEUE, - DBG_LVL_ALL, - "=====> Index %d", - iIndex); - - if (iIndex == HiPriority) - return; - - if (!Adapter || (iIndex < 0) || (iIndex > HiPriority)) - return; - - /* To Store the netdevice statistic */ - netstats = &Adapter->dev->stats; - - spin_lock_bh(&curr_pack_info->SFQueueLock); - - while (1) { -/* while((UINT)curr_pack_info->uiCurrentPacketsOnHost > - SF_MAX_ALLOWED_PACKETS_TO_BACKUP) { */ - - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - PRUNE_QUEUE, - DBG_LVL_ALL, - "uiCurrentBytesOnHost:%x uiMaxBucketSize :%x", - curr_pack_info->uiCurrentBytesOnHost, - curr_pack_info->uiMaxBucketSize); - - PacketToDrop = curr_pack_info->FirstTxQueue; - - if (PacketToDrop == NULL) - break; - if ((curr_pack_info->uiCurrentPacketsOnHost < - SF_MAX_ALLOWED_PACKETS_TO_BACKUP) && - ((1000*(jiffies - *((B_UINT32 *)(PacketToDrop->cb) + - SKB_CB_LATENCY_OFFSET))/HZ) <= - curr_pack_info->uiMaxLatency)) - break; - - if (PacketToDrop) { - if (netif_msg_tx_err(Adapter)) - pr_info(PFX "%s: tx queue %d overlimit\n", - Adapter->dev->name, iIndex); - - netstats->tx_dropped++; - - DEQUEUEPACKET(curr_pack_info->FirstTxQueue, - curr_pack_info->LastTxQueue); - /* update current bytes and packets count */ - curr_pack_info->uiCurrentBytesOnHost -= - PacketToDrop->len; - curr_pack_info->uiCurrentPacketsOnHost--; - /* update dropped bytes and packets counts */ - curr_pack_info->uiDroppedCountBytes += PacketToDrop->len; - curr_pack_info->uiDroppedCountPackets++; - dev_kfree_skb(PacketToDrop); - - } - - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - PRUNE_QUEUE, - DBG_LVL_ALL, - "Dropped Bytes:%x Dropped Packets:%x", - curr_pack_info->uiDroppedCountBytes, - curr_pack_info->uiDroppedCountPackets); - - atomic_dec(&Adapter->TotalPacketCount); - } - - spin_unlock_bh(&curr_pack_info->SFQueueLock); - - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - PRUNE_QUEUE, - DBG_LVL_ALL, - "TotalPacketCount:%x", - atomic_read(&Adapter->TotalPacketCount)); - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - PRUNE_QUEUE, - DBG_LVL_ALL, - "<====="); -} - -VOID flush_all_queues(struct bcm_mini_adapter *Adapter) -{ - INT iQIndex; - UINT uiTotalPacketLength; - struct sk_buff *PacketToDrop = NULL; - struct bcm_packet_info *curr_packet_info; - - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_OTHERS, - DUMP_INFO, - DBG_LVL_ALL, - "=====>"); - - /* down(&Adapter->data_packet_queue_lock); */ - for (iQIndex = LowPriority; iQIndex < HiPriority; iQIndex++) { - struct net_device_stats *netstats = &Adapter->dev->stats; - - curr_packet_info = &Adapter->PackInfo[iQIndex]; - - spin_lock_bh(&curr_packet_info->SFQueueLock); - while (curr_packet_info->FirstTxQueue) { - PacketToDrop = curr_packet_info->FirstTxQueue; - if (PacketToDrop) { - uiTotalPacketLength = PacketToDrop->len; - netstats->tx_dropped++; - } else - uiTotalPacketLength = 0; - - DEQUEUEPACKET(curr_packet_info->FirstTxQueue, - curr_packet_info->LastTxQueue); - - /* Free the skb */ - dev_kfree_skb(PacketToDrop); - - /* update current bytes and packets count */ - curr_packet_info->uiCurrentBytesOnHost -= uiTotalPacketLength; - curr_packet_info->uiCurrentPacketsOnHost--; - - /* update dropped bytes and packets counts */ - curr_packet_info->uiDroppedCountBytes += uiTotalPacketLength; - curr_packet_info->uiDroppedCountPackets++; - - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_OTHERS, - DUMP_INFO, - DBG_LVL_ALL, - "Dropped Bytes:%x Dropped Packets:%x", - curr_packet_info->uiDroppedCountBytes, - curr_packet_info->uiDroppedCountPackets); - atomic_dec(&Adapter->TotalPacketCount); - } - spin_unlock_bh(&curr_packet_info->SFQueueLock); - } - /* up(&Adapter->data_packet_queue_lock); */ - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_OTHERS, - DUMP_INFO, - DBG_LVL_ALL, - "<====="); -} - -USHORT ClassifyPacket(struct bcm_mini_adapter *Adapter, struct sk_buff *skb) -{ - INT uiLoopIndex = 0; - struct bcm_classifier_rule *pstClassifierRule = NULL; - struct bcm_eth_packet_info stEthCsPktInfo; - PVOID pvEThPayload = NULL; - struct iphdr *pIpHeader = NULL; - INT uiSfIndex = 0; - USHORT usIndex = Adapter->usBestEffortQueueIndex; - bool bFragmentedPkt = false, bClassificationSucceed = false; - USHORT usCurrFragment = 0; - - struct bcm_tcp_header *pTcpHeader; - UCHAR IpHeaderLength; - UCHAR TcpHeaderLength; - - pvEThPayload = skb->data; - *((UINT32 *) (skb->cb) + SKB_CB_TCPACK_OFFSET) = 0; - EThCSGetPktInfo(Adapter, pvEThPayload, &stEthCsPktInfo); - - switch (stEthCsPktInfo.eNwpktEthFrameType) { - case eEth802LLCFrame: - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "ClassifyPacket : 802LLCFrame\n"); - pIpHeader = pvEThPayload + sizeof(struct bcm_eth_llc_frame); - break; - case eEth802LLCSNAPFrame: - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "ClassifyPacket : 802LLC SNAP Frame\n"); - pIpHeader = pvEThPayload + - sizeof(struct bcm_eth_llc_snap_frame); - break; - case eEth802QVLANFrame: - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "ClassifyPacket : 802.1Q VLANFrame\n"); - pIpHeader = pvEThPayload + sizeof(struct bcm_eth_q_frame); - break; - case eEthOtherFrame: - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "ClassifyPacket : ETH Other Frame\n"); - pIpHeader = pvEThPayload + sizeof(struct bcm_ethernet2_frame); - break; - default: - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "ClassifyPacket : Unrecognized ETH Frame\n"); - pIpHeader = pvEThPayload + sizeof(struct bcm_ethernet2_frame); - break; - } - - if (stEthCsPktInfo.eNwpktIPFrameType == eIPv4Packet) { - usCurrFragment = (ntohs(pIpHeader->frag_off) & IP_OFFSET); - if ((ntohs(pIpHeader->frag_off) & IP_MF) || usCurrFragment) - bFragmentedPkt = TRUE; - - if (bFragmentedPkt) { - /* Fragmented Packet. Get Frag Classifier Entry. */ - pstClassifierRule = GetFragIPClsEntry(Adapter, - pIpHeader->id, - pIpHeader->saddr); - if (pstClassifierRule) { - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "It is next Fragmented pkt"); - bClassificationSucceed = TRUE; - } - if (!(ntohs(pIpHeader->frag_off) & IP_MF)) { - /* Fragmented Last packet . Remove Frag Classifier Entry */ - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "This is the last fragmented Pkt"); - DelFragIPClsEntry(Adapter, - pIpHeader->id, - pIpHeader->saddr); - } - } - } - - for (uiLoopIndex = MAX_CLASSIFIERS - 1; uiLoopIndex >= 0; uiLoopIndex--) { - if (bClassificationSucceed) - break; - /* - * Iterate through all classifiers which are already in order of priority - * to classify the packet until match found - */ - if (false == Adapter->astClassifierTable[uiLoopIndex].bUsed) { - bClassificationSucceed = false; - continue; - } - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "Adapter->PackInfo[%d].bvalid=True\n", - uiLoopIndex); - - if (0 == Adapter->astClassifierTable[uiLoopIndex].ucDirection) { - bClassificationSucceed = false; /* cannot be processed for classification. */ - continue; /* it is a down link connection */ - } - - pstClassifierRule = &Adapter->astClassifierTable[uiLoopIndex]; - - uiSfIndex = SearchSfid(Adapter, pstClassifierRule->ulSFID); - if (uiSfIndex >= NO_OF_QUEUES) { - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "Queue Not Valid. SearchSfid for this classifier Failed\n"); - continue; - } - - if (Adapter->PackInfo[uiSfIndex].bEthCSSupport) { - - if (eEthUnsupportedFrame == stEthCsPktInfo.eNwpktEthFrameType) { - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - " ClassifyPacket : Packet Not a Valid Supported Ethernet Frame\n"); - bClassificationSucceed = false; - continue; - } - - - - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "Performing ETH CS Classification on Classifier Rule ID : %x Service Flow ID : %lx\n", - pstClassifierRule->uiClassifierRuleIndex, - Adapter->PackInfo[uiSfIndex].ulSFID); - bClassificationSucceed = EThCSClassifyPkt(Adapter, - skb, - &stEthCsPktInfo, - pstClassifierRule, - Adapter->PackInfo[uiSfIndex].bEthCSSupport); - - if (!bClassificationSucceed) { - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "ClassifyPacket : Ethernet CS Classification Failed\n"); - continue; - } - } else { /* No ETH Supported on this SF */ - if (eEthOtherFrame != stEthCsPktInfo.eNwpktEthFrameType) { - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - " ClassifyPacket : Packet Not a 802.3 Ethernet Frame... hence not allowed over non-ETH CS SF\n"); - bClassificationSucceed = false; - continue; - } - } - - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "Proceeding to IP CS Clasification"); - - if (Adapter->PackInfo[uiSfIndex].bIPCSSupport) { - - if (stEthCsPktInfo.eNwpktIPFrameType == eNonIPPacket) { - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - " ClassifyPacket : Packet is Not an IP Packet\n"); - bClassificationSucceed = false; - continue; - } - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "Dump IP Header :\n"); - DumpFullPacket((PUCHAR)pIpHeader, 20); - - if (stEthCsPktInfo.eNwpktIPFrameType == eIPv4Packet) - bClassificationSucceed = IpVersion4(Adapter, - pIpHeader, - pstClassifierRule); - else if (stEthCsPktInfo.eNwpktIPFrameType == eIPv6Packet) - bClassificationSucceed = IpVersion6(Adapter, - pIpHeader, - pstClassifierRule); - } - } - - if (bClassificationSucceed == TRUE) { - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "CF id : %d, SF ID is =%lu", - pstClassifierRule->uiClassifierRuleIndex, - pstClassifierRule->ulSFID); - - /* Store The matched Classifier in SKB */ - *((UINT32 *)(skb->cb)+SKB_CB_CLASSIFICATION_OFFSET) = - pstClassifierRule->uiClassifierRuleIndex; - if ((TCP == pIpHeader->protocol) && !bFragmentedPkt && - (ETH_AND_IP_HEADER_LEN + TCP_HEADER_LEN <= - skb->len)) { - IpHeaderLength = pIpHeader->ihl; - pTcpHeader = - (struct bcm_tcp_header *)(((PUCHAR)pIpHeader) + - (IpHeaderLength*4)); - TcpHeaderLength = GET_TCP_HEADER_LEN(pTcpHeader->HeaderLength); - - if ((pTcpHeader->ucFlags & TCP_ACK) && - (ntohs(pIpHeader->tot_len) == - (IpHeaderLength*4)+(TcpHeaderLength*4))) - *((UINT32 *) (skb->cb) + SKB_CB_TCPACK_OFFSET) = - TCP_ACK; - } - - usIndex = SearchSfid(Adapter, pstClassifierRule->ulSFID); - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "index is =%d", - usIndex); - - /* - * If this is the first fragment of a Fragmented pkt, - * add this CF. Only This CF should be used for all other - * fragment of this Pkt. - */ - if (bFragmentedPkt && (usCurrFragment == 0)) { - /* - * First Fragment of Fragmented Packet. - * Create Frag CLS Entry - */ - struct bcm_fragmented_packet_info stFragPktInfo; - - stFragPktInfo.bUsed = TRUE; - stFragPktInfo.ulSrcIpAddress = pIpHeader->saddr; - stFragPktInfo.usIpIdentification = pIpHeader->id; - stFragPktInfo.pstMatchedClassifierEntry = - pstClassifierRule; - stFragPktInfo.bOutOfOrderFragment = false; - AddFragIPClsEntry(Adapter, &stFragPktInfo); - } - - - } - - return bClassificationSucceed ? usIndex : INVALID_QUEUE_INDEX; -} - -static bool EthCSMatchSrcMACAddress(struct bcm_classifier_rule *pstClassifierRule, - PUCHAR Mac) -{ - UINT i = 0; - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - - if (pstClassifierRule->ucEthCSSrcMACLen == 0) - return TRUE; - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "%s\n", __func__); - for (i = 0; i < MAC_ADDRESS_SIZE; i++) { - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "SRC MAC[%x] = %x ClassifierRuleSrcMAC = %x Mask : %x\n", - i, - Mac[i], - pstClassifierRule->au8EThCSSrcMAC[i], - pstClassifierRule->au8EThCSSrcMACMask[i]); - if ((pstClassifierRule->au8EThCSSrcMAC[i] & - pstClassifierRule->au8EThCSSrcMACMask[i]) != - (Mac[i] & pstClassifierRule->au8EThCSSrcMACMask[i])) - return false; - } - return TRUE; -} - -static bool EthCSMatchDestMACAddress(struct bcm_classifier_rule *pstClassifierRule, - PUCHAR Mac) -{ - UINT i = 0; - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - - if (pstClassifierRule->ucEthCSDestMACLen == 0) - return TRUE; - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "%s\n", - __func__); - for (i = 0; i < MAC_ADDRESS_SIZE; i++) { - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "SRC MAC[%x] = %x ClassifierRuleSrcMAC = %x Mask : %x\n", - i, - Mac[i], - pstClassifierRule->au8EThCSDestMAC[i], - pstClassifierRule->au8EThCSDestMACMask[i]); - if ((pstClassifierRule->au8EThCSDestMAC[i] & - pstClassifierRule->au8EThCSDestMACMask[i]) != - (Mac[i] & pstClassifierRule->au8EThCSDestMACMask[i])) - return false; - } - return TRUE; -} - -static bool EthCSMatchEThTypeSAP(struct bcm_classifier_rule *pstClassifierRule, - struct sk_buff *skb, - struct bcm_eth_packet_info *pstEthCsPktInfo) -{ - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - - if ((pstClassifierRule->ucEtherTypeLen == 0) || - (pstClassifierRule->au8EthCSEtherType[0] == 0)) - return TRUE; - - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "%s SrcEtherType:%x CLS EtherType[0]:%x\n", - __func__, - pstEthCsPktInfo->usEtherType, - pstClassifierRule->au8EthCSEtherType[0]); - if (pstClassifierRule->au8EthCSEtherType[0] == 1) { - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "%s CLS EtherType[1]:%x EtherType[2]:%x\n", - __func__, - pstClassifierRule->au8EthCSEtherType[1], - pstClassifierRule->au8EthCSEtherType[2]); - - if (memcmp(&pstEthCsPktInfo->usEtherType, - &pstClassifierRule->au8EthCSEtherType[1], - 2) == 0) - return TRUE; - else - return false; - } - - if (pstClassifierRule->au8EthCSEtherType[0] == 2) { - if (eEth802LLCFrame != pstEthCsPktInfo->eNwpktEthFrameType) - return false; - - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "%s EthCS DSAP:%x EtherType[2]:%x\n", - __func__, - pstEthCsPktInfo->ucDSAP, - pstClassifierRule->au8EthCSEtherType[2]); - if (pstEthCsPktInfo->ucDSAP == - pstClassifierRule->au8EthCSEtherType[2]) - return TRUE; - return false; - - } - - return false; - -} - -static bool EthCSMatchVLANRules(struct bcm_classifier_rule *pstClassifierRule, - struct sk_buff *skb, - struct bcm_eth_packet_info *pstEthCsPktInfo) -{ - bool bClassificationSucceed = false; - USHORT usVLANID; - B_UINT8 uPriority = 0; - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "%s CLS UserPrio:%x CLS VLANID:%x\n", - __func__, - ntohs(*((USHORT *)pstClassifierRule->usUserPriority)), - pstClassifierRule->usVLANID); - - /* - * In case FW didn't receive the TLV, - * the priority field should be ignored - */ - if (pstClassifierRule->usValidityBitMap & - (1<eNwpktEthFrameType != eEth802QVLANFrame) - return false; - - uPriority = (ntohs(*(USHORT *)(skb->data + - sizeof(struct bcm_eth_header))) & - 0xF000) >> 13; - - if ((uPriority >= pstClassifierRule->usUserPriority[0]) && - (uPriority <= - pstClassifierRule->usUserPriority[1])) - bClassificationSucceed = TRUE; - - if (!bClassificationSucceed) - return false; - } - - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "ETH CS 802.1 D User Priority Rule Matched\n"); - - bClassificationSucceed = false; - - if (pstClassifierRule->usValidityBitMap & - (1<eNwpktEthFrameType != eEth802QVLANFrame) - return false; - - usVLANID = ntohs(*(USHORT *)(skb->data + - sizeof(struct bcm_eth_header))) & 0xFFF; - - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "%s Pkt VLANID %x Priority: %d\n", - __func__, - usVLANID, - uPriority); - - if (usVLANID == ((pstClassifierRule->usVLANID & 0xFFF0) >> 4)) - bClassificationSucceed = TRUE; - - if (!bClassificationSucceed) - return false; - } - - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "ETH CS 802.1 Q VLAN ID Rule Matched\n"); - - return TRUE; -} - - -static bool EThCSClassifyPkt(struct bcm_mini_adapter *Adapter, - struct sk_buff *skb, - struct bcm_eth_packet_info *pstEthCsPktInfo, - struct bcm_classifier_rule *pstClassifierRule, - B_UINT8 EthCSCupport) -{ - bool bClassificationSucceed = false; - - bClassificationSucceed = EthCSMatchSrcMACAddress(pstClassifierRule, - ((struct bcm_eth_header *)(skb->data))->au8SourceAddress); - if (!bClassificationSucceed) - return false; - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "ETH CS SrcMAC Matched\n"); - - bClassificationSucceed = EthCSMatchDestMACAddress(pstClassifierRule, - ((struct bcm_eth_header *)(skb->data))->au8DestinationAddress); - if (!bClassificationSucceed) - return false; - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "ETH CS DestMAC Matched\n"); - - /* classify on ETHType/802.2SAP TLV */ - bClassificationSucceed = EthCSMatchEThTypeSAP(pstClassifierRule, - skb, - pstEthCsPktInfo); - if (!bClassificationSucceed) - return false; - - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "ETH CS EthType/802.2SAP Matched\n"); - - /* classify on 802.1VLAN Header Parameters */ - bClassificationSucceed = EthCSMatchVLANRules(pstClassifierRule, - skb, - pstEthCsPktInfo); - if (!bClassificationSucceed) - return false; - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "ETH CS 802.1 VLAN Rules Matched\n"); - - return bClassificationSucceed; -} - -static void EThCSGetPktInfo(struct bcm_mini_adapter *Adapter, - PVOID pvEthPayload, - struct bcm_eth_packet_info *pstEthCsPktInfo) -{ - USHORT u16Etype = ntohs( - ((struct bcm_eth_header *)pvEthPayload)->u16Etype); - - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "EthCSGetPktInfo : Eth Hdr Type : %X\n", - u16Etype); - if (u16Etype > 0x5dc) { - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "EthCSGetPktInfo : ETH2 Frame\n"); - /* ETH2 Frame */ - if (u16Etype == ETHERNET_FRAMETYPE_802QVLAN) { - /* 802.1Q VLAN Header */ - pstEthCsPktInfo->eNwpktEthFrameType = eEth802QVLANFrame; - u16Etype = ((struct bcm_eth_q_frame *)pvEthPayload)->EthType; - /* ((ETH_CS_802_Q_FRAME*)pvEthPayload)->UserPriority */ - } else { - pstEthCsPktInfo->eNwpktEthFrameType = eEthOtherFrame; - u16Etype = ntohs(u16Etype); - } - } else { - /* 802.2 LLC */ - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "802.2 LLC Frame\n"); - pstEthCsPktInfo->eNwpktEthFrameType = eEth802LLCFrame; - pstEthCsPktInfo->ucDSAP = - ((struct bcm_eth_llc_frame *)pvEthPayload)->DSAP; - if (pstEthCsPktInfo->ucDSAP == 0xAA && ((struct bcm_eth_llc_frame *)pvEthPayload)->SSAP == 0xAA) { - /* SNAP Frame */ - pstEthCsPktInfo->eNwpktEthFrameType = eEth802LLCSNAPFrame; - u16Etype = ((struct bcm_eth_llc_snap_frame *)pvEthPayload)->usEtherType; - } - } - if (u16Etype == ETHERNET_FRAMETYPE_IPV4) - pstEthCsPktInfo->eNwpktIPFrameType = eIPv4Packet; - else if (u16Etype == ETHERNET_FRAMETYPE_IPV6) - pstEthCsPktInfo->eNwpktIPFrameType = eIPv6Packet; - else - pstEthCsPktInfo->eNwpktIPFrameType = eNonIPPacket; - - pstEthCsPktInfo->usEtherType = ((struct bcm_eth_header *)pvEthPayload)->u16Etype; - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "EthCsPktInfo->eNwpktIPFrameType : %x\n", - pstEthCsPktInfo->eNwpktIPFrameType); - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "EthCsPktInfo->eNwpktEthFrameType : %x\n", - pstEthCsPktInfo->eNwpktEthFrameType); - BCM_DEBUG_PRINT(Adapter, - DBG_TYPE_TX, - IPV4_DBG, - DBG_LVL_ALL, - "EthCsPktInfo->usEtherType : %x\n", - pstEthCsPktInfo->usEtherType); -} - diff --git a/drivers/staging/bcm/Queue.h b/drivers/staging/bcm/Queue.h deleted file mode 100644 index 460c0aee67f6..000000000000 --- a/drivers/staging/bcm/Queue.h +++ /dev/null @@ -1,29 +0,0 @@ -/************************************* -* Queue.h -**************************************/ -#ifndef __QUEUE_H__ -#define __QUEUE_H__ - - - -#define ENQUEUEPACKET(_Head, _Tail, _Packet) \ -do { \ - if (!_Head) { \ - _Head = _Packet; \ - } \ - else { \ - (_Tail)->next = _Packet; \ - } \ - (_Packet)->next = NULL; \ - _Tail = _Packet; \ -} while (0) -#define DEQUEUEPACKET(Head, Tail) \ -do { \ - if (Head) { \ - if (!Head->next) { \ - Tail = NULL; \ - } \ - Head = Head->next; \ - } \ -} while (0) -#endif /* __QUEUE_H__ */ diff --git a/drivers/staging/bcm/TODO b/drivers/staging/bcm/TODO deleted file mode 100644 index 8467f45d08a6..000000000000 --- a/drivers/staging/bcm/TODO +++ /dev/null @@ -1,26 +0,0 @@ -This driver is barely functional in its current state. - -Kevin McKinney(klmckinney1@gmail.com) and Matthias Beyer(mail@beyermatthias.de) -are currently maintaining/cleaning up this driver. Please copy us on all -patches. More maintainers are aways welcomed. - -BIG: - - existing API is (/dev/tarang) should be replaced - Is it possible to use same API as Intel Wimax stack and - have same user level components. - - Qos and queue model is non-standard and inflexible. - Use existing TC Qos? - -TODO: - - support more than one board - eliminate global variables - - remove developer debug BCM_DEBUG() macros - add a limited number of messages through netif_msg() - - fix non-standard kernel style - - checkpatch warnings - - use request firmware - - fix use of file I/O to load config with better API - - merge some files together? - - cleanup/eliminate debug messages - - - diff --git a/drivers/staging/bcm/Transmit.c b/drivers/staging/bcm/Transmit.c deleted file mode 100644 index 622a482e9826..000000000000 --- a/drivers/staging/bcm/Transmit.c +++ /dev/null @@ -1,271 +0,0 @@ -/** - * @file Transmit.c - * @defgroup tx_functions Transmission - * @section Queueing - * @dot - * digraph transmit1 { - * node[shape=box] - * edge[weight=5;color=red] - * - * bcm_transmit->GetPacketQueueIndex[label="IP Packet"] - * GetPacketQueueIndex->IpVersion4[label="IPV4"] - * GetPacketQueueIndex->IpVersion6[label="IPV6"] - * } - * - * @enddot - * - * @section De-Queueing - * @dot - * digraph transmit2 { - * node[shape=box] - * edge[weight=5;color=red] - * interrupt_service_thread->transmit_packets - * tx_pkt_hdler->transmit_packets - * transmit_packets->CheckAndSendPacketFromIndex - * transmit_packets->UpdateTokenCount - * CheckAndSendPacketFromIndex->PruneQueue - * CheckAndSendPacketFromIndex->IsPacketAllowedForFlow - * CheckAndSendPacketFromIndex->SendControlPacket[label="control pkt"] - * SendControlPacket->bcm_cmd53 - * CheckAndSendPacketFromIndex->SendPacketFromQueue[label="data pkt"] - * SendPacketFromQueue->SetupNextSend->bcm_cmd53 - * } - * @enddot - */ - -#include "headers.h" - -/** - * @ingroup ctrl_pkt_functions - * This function dispatches control packet to the h/w interface - * @return zero(success) or -ve value(failure) - */ -int SendControlPacket(struct bcm_mini_adapter *Adapter, char *pControlPacket) -{ - struct bcm_leader *PLeader = (struct bcm_leader *)pControlPacket; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Tx"); - if (!pControlPacket || !Adapter) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, - "Got NULL Control Packet or Adapter"); - return STATUS_FAILURE; - } - if ((atomic_read(&Adapter->CurrNumFreeTxDesc) < - ((PLeader->PLength-1)/MAX_DEVICE_DESC_SIZE)+1)) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, - "NO FREE DESCRIPTORS TO SEND CONTROL PACKET"); - return STATUS_FAILURE; - } - - /* Update the netdevice statistics */ - /* Dump Packet */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, - "Leader Status: %x", PLeader->Status); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, - "Leader VCID: %x", PLeader->Vcid); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, - "Leader Length: %x", PLeader->PLength); - if (Adapter->device_removed) - return 0; - - if (netif_msg_pktdata(Adapter)) - print_hex_dump(KERN_DEBUG, PFX "tx control: ", DUMP_PREFIX_NONE, - 16, 1, pControlPacket, - PLeader->PLength + LEADER_SIZE, 0); - - Adapter->interface_transmit(Adapter->pvInterfaceAdapter, - pControlPacket, - (PLeader->PLength + LEADER_SIZE)); - - atomic_dec(&Adapter->CurrNumFreeTxDesc); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, - "<========="); - return STATUS_SUCCESS; -} - -/** - * @ingroup tx_functions - * This function despatches the IP packets with the given vcid - * to the target via the host h/w interface. - * @return zero(success) or -ve value(failure) - */ -int SetupNextSend(struct bcm_mini_adapter *Adapter, - struct sk_buff *Packet, USHORT Vcid) -{ - int status = 0; - bool bHeaderSupressionEnabled = false; - B_UINT16 uiClassifierRuleID; - u16 QueueIndex = skb_get_queue_mapping(Packet); - struct bcm_packet_info *curr_packet_info = - &Adapter->PackInfo[QueueIndex]; - struct bcm_leader Leader = {0}; - - if (Packet->len > MAX_DEVICE_DESC_SIZE) { - status = STATUS_FAILURE; - goto errExit; - } - - /* Get the Classifier Rule ID */ - uiClassifierRuleID = *((UINT32 *) (Packet->cb) + - SKB_CB_CLASSIFICATION_OFFSET); - - bHeaderSupressionEnabled = curr_packet_info->bHeaderSuppressionEnabled & - Adapter->bPHSEnabled; - - if (Adapter->device_removed) { - status = STATUS_FAILURE; - goto errExit; - } - - status = PHSTransmit(Adapter, &Packet, Vcid, uiClassifierRuleID, - bHeaderSupressionEnabled, - (UINT *)&Packet->len, - curr_packet_info->bEthCSSupport); - - if (status != STATUS_SUCCESS) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, - "PHS Transmit failed..\n"); - goto errExit; - } - - Leader.Vcid = Vcid; - - if (TCP_ACK == *((UINT32 *) (Packet->cb) + SKB_CB_TCPACK_OFFSET)) - Leader.Status = LEADER_STATUS_TCP_ACK; - else - Leader.Status = LEADER_STATUS; - - if (curr_packet_info->bEthCSSupport) { - Leader.PLength = Packet->len; - if (skb_headroom(Packet) < LEADER_SIZE) { - status = skb_cow(Packet, LEADER_SIZE); - if (status) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND, - DBG_LVL_ALL, - "bcm_transmit : Failed To Increase headRoom\n"); - goto errExit; - } - } - skb_push(Packet, LEADER_SIZE); - memcpy(Packet->data, &Leader, LEADER_SIZE); - } else { - Leader.PLength = Packet->len - ETH_HLEN; - memcpy((struct bcm_leader *)skb_pull(Packet, - (ETH_HLEN - LEADER_SIZE)), - &Leader, - LEADER_SIZE); - } - - status = Adapter->interface_transmit(Adapter->pvInterfaceAdapter, - Packet->data, - (Leader.PLength + LEADER_SIZE)); - if (status) { - ++Adapter->dev->stats.tx_errors; - if (netif_msg_tx_err(Adapter)) - pr_info(PFX "%s: transmit error %d\n", - Adapter->dev->name, - status); - } else { - struct net_device_stats *netstats = &Adapter->dev->stats; - - curr_packet_info->uiTotalTxBytes += Leader.PLength; - - netstats->tx_bytes += Leader.PLength; - ++netstats->tx_packets; - - curr_packet_info->uiCurrentTokenCount -= Leader.PLength << 3; - curr_packet_info->uiSentBytes += (Packet->len); - curr_packet_info->uiSentPackets++; - curr_packet_info->NumOfPacketsSent++; - - atomic_dec(&curr_packet_info->uiPerSFTxResourceCount); - curr_packet_info->uiThisPeriodSentBytes += Leader.PLength; - } - - atomic_dec(&Adapter->CurrNumFreeTxDesc); - -errExit: - dev_kfree_skb(Packet); - return status; -} - -static int tx_pending(struct bcm_mini_adapter *Adapter) -{ - return (atomic_read(&Adapter->TxPktAvail) - && MINIMUM_PENDING_DESCRIPTORS < - atomic_read(&Adapter->CurrNumFreeTxDesc)) - || Adapter->device_removed || (1 == Adapter->downloadDDR); -} - -/** - * @ingroup tx_functions - * Transmit thread - */ -int tx_pkt_handler(struct bcm_mini_adapter *Adapter) -{ - int status = 0; - - while (!kthread_should_stop()) { - /* FIXME - the timeout looks like workaround - * for racey usage of TxPktAvail - */ - if (Adapter->LinkUpStatus) - wait_event_timeout(Adapter->tx_packet_wait_queue, - tx_pending(Adapter), - msecs_to_jiffies(10)); - else - wait_event_interruptible(Adapter->tx_packet_wait_queue, - tx_pending(Adapter)); - - if (Adapter->device_removed) - break; - - if (Adapter->downloadDDR == 1) { - Adapter->downloadDDR += 1; - status = download_ddr_settings(Adapter); - if (status) - pr_err(PFX "DDR DOWNLOAD FAILED! %d\n", status); - continue; - } - - /* Check end point for halt/stall. */ - if (Adapter->bEndPointHalted == TRUE) { - Bcm_clear_halt_of_endpoints(Adapter); - Adapter->bEndPointHalted = false; - StartInterruptUrb((struct bcm_interface_adapter *) - (Adapter->pvInterfaceAdapter)); - } - - if (Adapter->LinkUpStatus && !Adapter->IdleMode) { - if (atomic_read(&Adapter->TotalPacketCount)) - update_per_sf_desc_cnts(Adapter); - } - - if (atomic_read(&Adapter->CurrNumFreeTxDesc) && - Adapter->LinkStatus == SYNC_UP_REQUEST && - !Adapter->bSyncUpRequestSent) { - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, - DBG_LVL_ALL, "Calling LinkMessage"); - LinkMessage(Adapter); - } - - if ((Adapter->IdleMode || Adapter->bShutStatus) && - atomic_read(&Adapter->TotalPacketCount)) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, - TX_PACKETS, DBG_LVL_ALL, - "Device in Low Power mode...waking up"); - Adapter->usIdleModePattern = ABORT_IDLE_MODE; - Adapter->bWakeUpDevice = TRUE; - wake_up(&Adapter->process_rx_cntrlpkt); - } - - transmit_packets(Adapter); - atomic_set(&Adapter->TxPktAvail, 0); - } - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, - "Exiting the tx thread..\n"); - Adapter->transmit_packet_thread = NULL; - return 0; -} diff --git a/drivers/staging/bcm/Typedefs.h b/drivers/staging/bcm/Typedefs.h deleted file mode 100644 index 90b3b25dd606..000000000000 --- a/drivers/staging/bcm/Typedefs.h +++ /dev/null @@ -1,47 +0,0 @@ -/**************************** -* Typedefs.h -****************************/ -#ifndef __TYPEDEFS_H__ -#define __TYPEDEFS_H__ -#define STATUS_SUCCESS 0 -#define STATUS_FAILURE -1 - - -#define TRUE 1 - - -typedef char CHAR; -typedef int INT; -typedef short SHORT; -typedef long LONG; -typedef void VOID; - -typedef unsigned char UCHAR; -typedef unsigned char B_UINT8; -typedef unsigned short USHORT; -typedef unsigned short B_UINT16; -typedef unsigned int UINT; -typedef unsigned int B_UINT32; -typedef unsigned long ULONG; -typedef unsigned long DWORD; - -typedef char *PCHAR; -typedef short *PSHORT; -typedef int *PINT; -typedef long *PLONG; -typedef void *PVOID; - -typedef unsigned char *PUCHAR; -typedef unsigned short *PUSHORT; -typedef unsigned int *PUINT; -typedef unsigned long *PULONG; -typedef unsigned long long ULONG64; -typedef unsigned long long LARGE_INTEGER; -typedef unsigned int UINT32; -#ifndef NULL -#define NULL 0 -#endif - - -#endif /* __TYPEDEFS_H__ */ - diff --git a/drivers/staging/bcm/cntrl_SignalingInterface.h b/drivers/staging/bcm/cntrl_SignalingInterface.h deleted file mode 100644 index 8683c2d4276e..000000000000 --- a/drivers/staging/bcm/cntrl_SignalingInterface.h +++ /dev/null @@ -1,311 +0,0 @@ -#ifndef CNTRL_SIGNALING_INTERFACE_ -#define CNTRL_SIGNALING_INTERFACE_ - -#define DSA_REQ 11 -#define DSA_RSP 12 -#define DSA_ACK 13 -#define DSC_REQ 14 -#define DSC_RSP 15 -#define DSC_ACK 16 -#define DSD_REQ 17 -#define DSD_RSP 18 -#define DSD_ACK 19 -#define MAX_CLASSIFIERS_IN_SF 4 - -#define MAX_STRING_LEN 20 -#define MAX_PHS_LENGTHS 255 -#define VENDOR_PHS_PARAM_LENGTH 10 -#define MAX_NUM_ACTIVE_BS 10 -#define AUTH_TOKEN_LENGTH 10 -#define NUM_HARQ_CHANNELS 16 /* Changed from 10 to 16 to accommodate all HARQ channels */ -#define VENDOR_CLASSIFIER_PARAM_LENGTH 1 /* Changed the size to 1 byte since we dnt use it */ -#define VENDOR_SPECIF_QOS_PARAM 1 -#define VENDOR_PHS_PARAM_LENGTH 10 -#define MBS_CONTENTS_ID_LENGTH 10 -#define GLOBAL_SF_CLASSNAME_LENGTH 6 - -#define TYPE_OF_SERVICE_LENGTH 3 -#define IP_MASKED_SRC_ADDRESS_LENGTH 32 -#define IP_MASKED_DEST_ADDRESS_LENGTH 32 -#define PROTOCOL_SRC_PORT_RANGE_LENGTH 4 -#define PROTOCOL_DEST_PORT_RANGE_LENGTH 4 -#define ETHERNET_DEST_MAC_ADDR_LENGTH 12 -#define ETHERNET_SRC_MAC_ADDR_LENGTH 12 -#define NUM_ETHERTYPE_BYTES 3 -#define NUM_IPV6_FLOWLABLE_BYTES 3 - -struct bcm_packet_class_rules { - /* 16bit UserPriority Of The Service Flow */ - u16 u16UserPriority; - /* 16bit VLANID Of The Service Flow */ - u16 u16VLANID; - /* 16bit Packet Classification RuleIndex Of The Service Flow */ - u16 u16PacketClassificationRuleIndex; - /* 8bit Classifier Rule Priority Of The Service Flow */ - u8 u8ClassifierRulePriority; - /* Length of IP TypeOfService field */ - u8 u8IPTypeOfServiceLength; - /* 3bytes IP TypeOfService */ - u8 u8IPTypeOfService[TYPE_OF_SERVICE_LENGTH]; - /* Protocol used in classification of Service Flow */ - u8 u8Protocol; - /* Length of IP Masked Source Address */ - u8 u8IPMaskedSourceAddressLength; - /* IP Masked Source Address used in classification for the Service Flow */ - u8 u8IPMaskedSourceAddress[IP_MASKED_SRC_ADDRESS_LENGTH]; - /* Length of IP Destination Address */ - u8 u8IPDestinationAddressLength; - /* IP Destination Address used in classification for the Service Flow */ - u8 u8IPDestinationAddress[IP_MASKED_DEST_ADDRESS_LENGTH]; - /* Length of Protocol Source Port Range */ - u8 u8ProtocolSourcePortRangeLength; - /* Protocol Source Port Range used in the Service Flow */ - u8 u8ProtocolSourcePortRange[PROTOCOL_SRC_PORT_RANGE_LENGTH]; - /* Length of Protocol Dest Port Range */ - u8 u8ProtocolDestPortRangeLength; - /* Protocol Dest Port Range used in the Service Flow */ - u8 u8ProtocolDestPortRange[PROTOCOL_DEST_PORT_RANGE_LENGTH]; - /* Length of Ethernet Destination MAC Address */ - u8 u8EthernetDestMacAddressLength; - /* Ethernet Destination MAC Address used in classification of the Service Flow */ - u8 u8EthernetDestMacAddress[ETHERNET_DEST_MAC_ADDR_LENGTH]; - /* Length of Ethernet Source MAC Address */ - u8 u8EthernetSourceMACAddressLength; - /* Ethernet Source MAC Address used in classification of the Service Flow */ - u8 u8EthernetSourceMACAddress[ETHERNET_SRC_MAC_ADDR_LENGTH]; - /* Length of Ethertype */ - u8 u8EthertypeLength; - /* 3bytes Ethertype Of The Service Flow */ - u8 u8Ethertype[NUM_ETHERTYPE_BYTES]; - /* 8bit Associated PHSI Of The Service Flow */ - u8 u8AssociatedPHSI; - /* Length of Vendor Specific Classifier Param length Of The Service Flow */ - u8 u8VendorSpecificClassifierParamLength; - /* Vendor Specific Classifier Param Of The Service Flow */ - u8 u8VendorSpecificClassifierParam[VENDOR_CLASSIFIER_PARAM_LENGTH]; - /* Length Of IPv6 Flow Lable of the Service Flow */ - u8 u8IPv6FlowLableLength; - /* IPv6 Flow Lable Of The Service Flow */ - u8 u8IPv6FlowLable[NUM_IPV6_FLOWLABLE_BYTES]; - /* Action associated with the classifier rule */ - u8 u8ClassifierActionRule; - u16 u16ValidityBitMap; -}; - -struct bcm_phs_rules { - /* 8bit PHS Index Of The Service Flow */ - u8 u8PHSI; - /* PHSF Length Of The Service Flow */ - u8 u8PHSFLength; - /* String of bytes containing header information to be suppressed by the sending CS and reconstructed by the receiving CS */ - u8 u8PHSF[MAX_PHS_LENGTHS]; - /* PHSM Length Of The Service Flow */ - u8 u8PHSMLength; - /* PHS Mask for the SF */ - u8 u8PHSM[MAX_PHS_LENGTHS]; - /* 8bit Total number of bytes to be suppressed for the Service Flow */ - u8 u8PHSS; - /* 8bit Indicates whether or not Packet Header contents need to be verified prior to suppression */ - u8 u8PHSV; - /* Vendor Specific PHS param Length Of The Service Flow */ - u8 u8VendorSpecificPHSParamsLength; - /* Vendor Specific PHS param Of The Service Flow */ - u8 u8VendorSpecificPHSParams[VENDOR_PHS_PARAM_LENGTH]; - u8 u8Padding[2]; -}; - -struct bcm_convergence_types { - /* 8bit Phs Classfier Action Of The Service Flow */ - u8 u8ClassfierDSCAction; - /* 8bit Phs DSC Action Of The Service Flow */ - u8 u8PhsDSCAction; - /* 16bit Padding */ - u8 u8Padding[2]; - /* Packet classification rules structure */ - struct bcm_packet_class_rules cCPacketClassificationRule; - /* Payload header suppression rules structure */ - struct bcm_phs_rules cPhsRule; -}; - -struct bcm_connect_mgr_params { - /* 32bitSFID Of The Service Flow */ - u32 u32SFID; - /* 32bit Maximum Sustained Traffic Rate of the Service Flow */ - u32 u32MaxSustainedTrafficRate; - /* 32bit Maximum Traffic Burst allowed for the Service Flow */ - u32 u32MaxTrafficBurst; - /* 32bit Minimum Reserved Traffic Rate of the Service Flow */ - u32 u32MinReservedTrafficRate; - /* 32bit Tolerated Jitter of the Service Flow */ - u32 u32ToleratedJitter; - /* 32bit Maximum Latency of the Service Flow */ - u32 u32MaximumLatency; - /* 16bitCID Of The Service Flow */ - u16 u16CID; - /* 16bit SAID on which the service flow being set up shall be mapped */ - u16 u16TargetSAID; - /* 16bit ARQ window size negotiated */ - u16 u16ARQWindowSize; - /* 16bit Total Tx delay incl sending, receiving & processing delays */ - u16 u16ARQRetryTxTimeOut; - /* 16bit Total Rx delay incl sending, receiving & processing delays */ - u16 u16ARQRetryRxTimeOut; - /* 16bit ARQ block lifetime */ - u16 u16ARQBlockLifeTime; - /* 16bit ARQ Sync loss timeout */ - u16 u16ARQSyncLossTimeOut; - /* 16bit ARQ Purge timeout */ - u16 u16ARQRxPurgeTimeOut; - /* TODO::Remove this once we move to a new CORR2 driver - * brief Size of an ARQ block - */ - u16 u16ARQBlockSize; - /* #endif */ - /* 16bit Nominal interval b/w consecutive SDU arrivals at MAC SAP */ - u16 u16SDUInterArrivalTime; - /* 16bit Specifies the time base for rate measurement */ - u16 u16TimeBase; - /* 16bit Interval b/w Successive Grant oppurtunities */ - u16 u16UnsolicitedGrantInterval; - /* 16bit Interval b/w Successive Polling grant oppurtunities */ - u16 u16UnsolicitedPollingInterval; - /* internal var to get the overhead */ - u16 u16MacOverhead; - /* MBS contents Identifier */ - u16 u16MBSContentsID[MBS_CONTENTS_ID_LENGTH]; - /* MBS contents Identifier length */ - u8 u8MBSContentsIDLength; - /* ServiceClassName Length Of The Service Flow */ - u8 u8ServiceClassNameLength; - /* 32bytes ServiceClassName Of The Service Flow */ - u8 u8ServiceClassName[32]; - /* 8bit Indicates whether or not MBS service is requested for this Serivce Flow */ - u8 u8MBSService; - /* 8bit QOS Parameter Set specifies proper application of QoS parameters to Provisioned, Admitted and Active sets */ - u8 u8QosParamSet; - /* 8bit Traffic Priority Of the Service Flow */ - u8 u8TrafficPriority; - /* 8bit Uplink Grant Scheduling Type of The Service Flow */ - u8 u8ServiceFlowSchedulingType; - /* 8bit Request transmission Policy of the Service Flow */ - u8 u8RequesttransmissionPolicy; - /* 8bit Specifies whether SDUs for this Service flow are of FixedLength or Variable length */ - u8 u8FixedLengthVSVariableLengthSDUIndicator; - /* 8bit Length of the SDU for a fixed length SDU service flow */ - u8 u8SDUSize; - /* 8bit Indicates whether or not ARQ is requested for this connection */ - u8 u8ARQEnable; - /* < 8bit Indicates whether or not data has tobe delivered in order to higher layer */ - u8 u8ARQDeliverInOrder; - /* 8bit Receiver ARQ ACK processing time */ - u8 u8RxARQAckProcessingTime; - /* 8bit Convergence Sublayer Specification Of The Service Flow */ - u8 u8CSSpecification; - /* 8 bit Type of data delivery service */ - u8 u8TypeOfDataDeliveryService; - /* 8bit Specifies whether a service flow may generate Paging */ - u8 u8PagingPreference; - /* 8bit Indicates the MBS Zone through which the connection or virtual connection is valid */ - u8 u8MBSZoneIdentifierassignment; - /* 8bit Specifies whether traffic on SF should generate MOB_TRF_IND to MS in sleep mode */ - u8 u8TrafficIndicationPreference; - /* 8bit Speciifes the length of predefined Global QoS parameter set encoding for this SF */ - u8 u8GlobalServicesClassNameLength; - /* 6 byte Speciifes the predefined Global QoS parameter set encoding for this SF */ - u8 u8GlobalServicesClassName[GLOBAL_SF_CLASSNAME_LENGTH]; - /* 8bit Indicates whether or not SN feedback is enabled for the conn */ - u8 u8SNFeedbackEnabled; - /* Indicates the size of the Fragment Sequence Number for the connection */ - u8 u8FSNSize; - /* 8bit Number of CIDs in active BS list */ - u8 u8CIDAllocation4activeBSsLength; - /* CIDs of BS in the active list */ - u8 u8CIDAllocation4activeBSs[MAX_NUM_ACTIVE_BS]; - /* Specifies if PDU extended subheader should be applied on every PDU on this conn */ - u8 u8PDUSNExtendedSubheader4HarqReordering; - /* 8bit Specifies whether the connection uses HARQ or not */ - u8 u8HARQServiceFlows; - /* Specifies the length of Authorization token */ - u8 u8AuthTokenLength; - /* Specifies the Authorization token */ - u8 u8AuthToken[AUTH_TOKEN_LENGTH]; - /* specifes Number of HARQ channels used to carry data length */ - u8 u8HarqChannelMappingLength; - /* specifes HARQ channels used to carry data */ - u8 u8HARQChannelMapping[NUM_HARQ_CHANNELS]; - /* 8bit Length of Vendor Specific QoS Params */ - u8 u8VendorSpecificQoSParamLength; - /* 1byte Vendor Specific QoS Param Of The Service Flow */ - u8 u8VendorSpecificQoSParam[VENDOR_SPECIF_QOS_PARAM]; - /* indicates total classifiers in the SF */ - u8 u8TotalClassifiers; /* < Total number of valid classifiers */ - u8 bValid; /* < Validity flag */ - u8 u8Padding; /* < Padding byte */ - /* - * Structure for Convergence SubLayer Types with a maximum of 4 classifiers - */ - struct bcm_convergence_types cConvergenceSLTypes[MAX_CLASSIFIERS_IN_SF]; -}; - -struct bcm_add_request { - u8 u8Type; /* < Type */ - u8 eConnectionDir; /* < Connection direction */ - /* brief 16 bit TID */ - u16 u16TID; /* < 16bit TID */ - /* brief 16bitCID */ - u16 u16CID; /* < 16bit CID */ - /* brief 16bitVCID */ - u16 u16VCID; /* < 16bit VCID */ - struct bcm_connect_mgr_params *psfParameterSet; /* < connection manager parameters */ -}; - -struct bcm_add_indication { - u8 u8Type; /* < Type */ - u8 eConnectionDir; /* < Connection Direction */ - /* brief 16 bit TID */ - u16 u16TID; /* < TID */ - /* brief 16bitCID */ - u16 u16CID; /* < 16bitCID */ - /* brief 16bitVCID */ - u16 u16VCID; /* < 16bitVCID */ - struct bcm_connect_mgr_params *psfAuthorizedSet; /* Authorized set of connection manager parameters */ - struct bcm_connect_mgr_params *psfAdmittedSet; /* Admitted set of connection manager parameters */ - struct bcm_connect_mgr_params *psfActiveSet; /* Activeset of connection manager parameters */ - u8 u8CC; /* -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "Typedefs.h" -#include "Macros.h" -#include "HostMIBSInterface.h" -#include "cntrl_SignalingInterface.h" -#include "PHSDefines.h" -#include "led_control.h" -#include "Ioctl.h" -#include "nvm.h" -#include "target_params.h" -#include "Adapter.h" -#include "CmHost.h" -#include "DDRInit.h" -#include "Debug.h" -#include "IPv6ProtocolHdr.h" -#include "PHSModule.h" -#include "Protocol.h" -#include "Prototypes.h" -#include "Queue.h" -#include "vendorspecificextn.h" - -#include "InterfaceMacros.h" -#include "InterfaceAdapter.h" -#include "InterfaceIsr.h" -#include "InterfaceMisc.h" -#include "InterfaceRx.h" -#include "InterfaceTx.h" -#include "InterfaceIdleMode.h" -#include "InterfaceInit.h" - -#define DRV_NAME "beceem" -#define DEV_NAME "tarang" -#define DRV_DESCRIPTION "Beceem Communications Inc. WiMAX driver" -#define DRV_COPYRIGHT "Copyright 2010. Beceem Communications Inc" -#define DRV_VERSION "5.2.45" -#define PFX DRV_NAME " " - -extern struct class *bcm_class; - -#endif diff --git a/drivers/staging/bcm/hostmibs.c b/drivers/staging/bcm/hostmibs.c deleted file mode 100644 index f9b08a5d8ce8..000000000000 --- a/drivers/staging/bcm/hostmibs.c +++ /dev/null @@ -1,164 +0,0 @@ -/* - * File Name: hostmibs.c - * - * Author: Beceem Communications Pvt. Ltd - * - * Abstract: This file contains the routines to copy the statistics used by - * the driver to the Host MIBS structure and giving the same to Application. - */ - -#include "headers.h" - -INT ProcessGetHostMibs(struct bcm_mini_adapter *Adapter, - struct bcm_host_stats_mibs *pstHostMibs) -{ - struct bcm_phs_entry *pstServiceFlowEntry = NULL; - struct bcm_phs_rule *pstPhsRule = NULL; - struct bcm_phs_classifier_table *pstClassifierTable = NULL; - struct bcm_phs_classifier_entry *pstClassifierRule = NULL; - struct bcm_phs_extension *pDeviceExtension = &Adapter->stBCMPhsContext; - struct bcm_mibs_host_info *host_info; - UINT nClassifierIndex = 0; - UINT nPhsTableIndex = 0; - UINT nSfIndex = 0; - UINT uiIndex = 0; - - if (pDeviceExtension == NULL) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, HOST_MIBS, - DBG_LVL_ALL, "Invalid Device Extension\n"); - return STATUS_FAILURE; - } - - /* Copy the classifier Table */ - for (nClassifierIndex = 0; nClassifierIndex < MAX_CLASSIFIERS; - nClassifierIndex++) { - if (Adapter->astClassifierTable[nClassifierIndex].bUsed == TRUE) - memcpy(&pstHostMibs->astClassifierTable[nClassifierIndex], - &Adapter->astClassifierTable[nClassifierIndex], - sizeof(struct bcm_mibs_classifier_rule)); - } - - /* Copy the SF Table */ - for (nSfIndex = 0; nSfIndex < NO_OF_QUEUES; nSfIndex++) { - if (Adapter->PackInfo[nSfIndex].bValid) { - memcpy(&pstHostMibs->astSFtable[nSfIndex], - &Adapter->PackInfo[nSfIndex], - sizeof(struct bcm_mibs_table)); - } else { - /* If index in not valid, - * don't process this for the PHS table. - * Go For the next entry. - */ - continue; - } - - /* Retrieve the SFID Entry Index for requested Service Flow */ - if (PHS_INVALID_TABLE_INDEX == - GetServiceFlowEntry(pDeviceExtension-> - pstServiceFlowPhsRulesTable, - Adapter->PackInfo[nSfIndex]. - usVCID_Value, &pstServiceFlowEntry)) - - continue; - - pstClassifierTable = pstServiceFlowEntry->pstClassifierTable; - - for (uiIndex = 0; uiIndex < MAX_PHSRULE_PER_SF; uiIndex++) { - pstClassifierRule = &pstClassifierTable->stActivePhsRulesList[uiIndex]; - - if (pstClassifierRule->bUsed) { - pstPhsRule = pstClassifierRule->pstPhsRule; - - pstHostMibs->astPhsRulesTable[nPhsTableIndex]. - ulSFID = Adapter->PackInfo[nSfIndex].ulSFID; - - memcpy(&pstHostMibs->astPhsRulesTable[nPhsTableIndex].u8PHSI, - &pstPhsRule->u8PHSI, - sizeof(struct bcm_phs_rule)); - nPhsTableIndex++; - - } - - } - - } - - /* Copy other Host Statistics parameters */ - host_info = &pstHostMibs->stHostInfo; - host_info->GoodTransmits = Adapter->dev->stats.tx_packets; - host_info->GoodReceives = Adapter->dev->stats.rx_packets; - host_info->CurrNumFreeDesc = atomic_read(&Adapter->CurrNumFreeTxDesc); - host_info->BEBucketSize = Adapter->BEBucketSize; - host_info->rtPSBucketSize = Adapter->rtPSBucketSize; - host_info->TimerActive = Adapter->TimerActive; - host_info->u32TotalDSD = Adapter->u32TotalDSD; - - memcpy(host_info->aTxPktSizeHist, Adapter->aTxPktSizeHist, - sizeof(UINT32) * MIBS_MAX_HIST_ENTRIES); - memcpy(host_info->aRxPktSizeHist, Adapter->aRxPktSizeHist, - sizeof(UINT32) * MIBS_MAX_HIST_ENTRIES); - - return STATUS_SUCCESS; -} - -VOID GetDroppedAppCntrlPktMibs(struct bcm_host_stats_mibs *pstHostMibs, - struct bcm_tarang_data *pTarang) -{ - memcpy(&(pstHostMibs->stDroppedAppCntrlMsgs), - &(pTarang->stDroppedAppCntrlMsgs), - sizeof(struct bcm_mibs_dropped_cntrl_msg)); -} - -VOID CopyMIBSExtendedSFParameters(struct bcm_mini_adapter *Adapter, - struct bcm_connect_mgr_params *psfLocalSet, - UINT uiSearchRuleIndex) -{ - struct bcm_mibs_parameters *t = - &Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable; - - t->wmanIfSfid = psfLocalSet->u32SFID; - t->wmanIfCmnCpsMaxSustainedRate = - psfLocalSet->u32MaxSustainedTrafficRate; - t->wmanIfCmnCpsMaxTrafficBurst = psfLocalSet->u32MaxTrafficBurst; - t->wmanIfCmnCpsMinReservedRate = psfLocalSet->u32MinReservedTrafficRate; - t->wmanIfCmnCpsToleratedJitter = psfLocalSet->u32ToleratedJitter; - t->wmanIfCmnCpsMaxLatency = psfLocalSet->u32MaximumLatency; - t->wmanIfCmnCpsFixedVsVariableSduInd = - psfLocalSet->u8FixedLengthVSVariableLengthSDUIndicator; - t->wmanIfCmnCpsFixedVsVariableSduInd = - ntohl(t->wmanIfCmnCpsFixedVsVariableSduInd); - t->wmanIfCmnCpsSduSize = psfLocalSet->u8SDUSize; - t->wmanIfCmnCpsSduSize = ntohl(t->wmanIfCmnCpsSduSize); - t->wmanIfCmnCpsSfSchedulingType = - psfLocalSet->u8ServiceFlowSchedulingType; - t->wmanIfCmnCpsSfSchedulingType = - ntohl(t->wmanIfCmnCpsSfSchedulingType); - t->wmanIfCmnCpsArqEnable = psfLocalSet->u8ARQEnable; - t->wmanIfCmnCpsArqEnable = ntohl(t->wmanIfCmnCpsArqEnable); - t->wmanIfCmnCpsArqWindowSize = ntohs(psfLocalSet->u16ARQWindowSize); - t->wmanIfCmnCpsArqWindowSize = ntohl(t->wmanIfCmnCpsArqWindowSize); - t->wmanIfCmnCpsArqBlockLifetime = - ntohs(psfLocalSet->u16ARQBlockLifeTime); - t->wmanIfCmnCpsArqBlockLifetime = - ntohl(t->wmanIfCmnCpsArqBlockLifetime); - t->wmanIfCmnCpsArqSyncLossTimeout = - ntohs(psfLocalSet->u16ARQSyncLossTimeOut); - t->wmanIfCmnCpsArqSyncLossTimeout = - ntohl(t->wmanIfCmnCpsArqSyncLossTimeout); - t->wmanIfCmnCpsArqDeliverInOrder = psfLocalSet->u8ARQDeliverInOrder; - t->wmanIfCmnCpsArqDeliverInOrder = - ntohl(t->wmanIfCmnCpsArqDeliverInOrder); - t->wmanIfCmnCpsArqRxPurgeTimeout = - ntohs(psfLocalSet->u16ARQRxPurgeTimeOut); - t->wmanIfCmnCpsArqRxPurgeTimeout = - ntohl(t->wmanIfCmnCpsArqRxPurgeTimeout); - t->wmanIfCmnCpsArqBlockSize = ntohs(psfLocalSet->u16ARQBlockSize); - t->wmanIfCmnCpsArqBlockSize = ntohl(t->wmanIfCmnCpsArqBlockSize); - t->wmanIfCmnCpsReqTxPolicy = psfLocalSet->u8RequesttransmissionPolicy; - t->wmanIfCmnCpsReqTxPolicy = ntohl(t->wmanIfCmnCpsReqTxPolicy); - t->wmanIfCmnSfCsSpecification = psfLocalSet->u8CSSpecification; - t->wmanIfCmnSfCsSpecification = ntohl(t->wmanIfCmnSfCsSpecification); - t->wmanIfCmnCpsTargetSaid = ntohs(psfLocalSet->u16TargetSAID); - t->wmanIfCmnCpsTargetSaid = ntohl(t->wmanIfCmnCpsTargetSaid); - -} diff --git a/drivers/staging/bcm/led_control.c b/drivers/staging/bcm/led_control.c deleted file mode 100644 index 181f17ff0a24..000000000000 --- a/drivers/staging/bcm/led_control.c +++ /dev/null @@ -1,951 +0,0 @@ -#include "headers.h" - -#define STATUS_IMAGE_CHECKSUM_MISMATCH -199 -#define EVENT_SIGNALED 1 - -static B_UINT16 CFG_CalculateChecksum(B_UINT8 *pu8Buffer, B_UINT32 u32Size) -{ - B_UINT16 u16CheckSum = 0; - - while (u32Size--) { - u16CheckSum += (B_UINT8)~(*pu8Buffer); - pu8Buffer++; - } - return u16CheckSum; -} - -bool IsReqGpioIsLedInNVM(struct bcm_mini_adapter *Adapter, UINT gpios) -{ - INT Status; - - Status = (Adapter->gpioBitMap & gpios) ^ gpios; - if (Status) - return false; - else - return TRUE; -} - -static INT LED_Blink(struct bcm_mini_adapter *Adapter, - UINT GPIO_Num, - UCHAR uiLedIndex, - ULONG timeout, - INT num_of_time, - enum bcm_led_events currdriverstate) -{ - int Status = STATUS_SUCCESS; - bool bInfinite = false; - - /* Check if num_of_time is -ve. If yes, blink led in infinite loop */ - if (num_of_time < 0) { - bInfinite = TRUE; - num_of_time = 1; - } - while (num_of_time) { - if (currdriverstate == Adapter->DriverState) - TURN_ON_LED(Adapter, GPIO_Num, uiLedIndex); - - /* Wait for timeout after setting on the LED */ - Status = wait_event_interruptible_timeout( - Adapter->LEDInfo.notify_led_event, - currdriverstate != Adapter->DriverState || - kthread_should_stop(), - msecs_to_jiffies(timeout)); - - if (kthread_should_stop()) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, - DBG_LVL_ALL, - "Led thread got signal to exit..hence exiting"); - Adapter->LEDInfo.led_thread_running = - BCM_LED_THREAD_DISABLED; - TURN_OFF_LED(Adapter, GPIO_Num, uiLedIndex); - Status = EVENT_SIGNALED; - break; - } - if (Status) { - TURN_OFF_LED(Adapter, GPIO_Num, uiLedIndex); - Status = EVENT_SIGNALED; - break; - } - - TURN_OFF_LED(Adapter, GPIO_Num, uiLedIndex); - Status = wait_event_interruptible_timeout( - Adapter->LEDInfo.notify_led_event, - currdriverstate != Adapter->DriverState || - kthread_should_stop(), - msecs_to_jiffies(timeout)); - if (bInfinite == false) - num_of_time--; - } - return Status; -} - -static INT ScaleRateofTransfer(ULONG rate) -{ - if (rate <= 3) - return rate; - else if ((rate > 3) && (rate <= 100)) - return 5; - else if ((rate > 100) && (rate <= 200)) - return 6; - else if ((rate > 200) && (rate <= 300)) - return 7; - else if ((rate > 300) && (rate <= 400)) - return 8; - else if ((rate > 400) && (rate <= 500)) - return 9; - else if ((rate > 500) && (rate <= 600)) - return 10; - return MAX_NUM_OF_BLINKS; -} - -static INT blink_in_normal_bandwidth(struct bcm_mini_adapter *ad, - INT *time, - INT *time_tx, - INT *time_rx, - UCHAR GPIO_Num_tx, - UCHAR uiTxLedIndex, - UCHAR GPIO_Num_rx, - UCHAR uiRxLedIndex, - enum bcm_led_events currdriverstate, - ulong *timeout) -{ - /* - * Assign minimum number of blinks of - * either Tx or Rx. - */ - *time = (*time_tx > *time_rx ? *time_rx : *time_tx); - - if (*time > 0) { - /* Blink both Tx and Rx LEDs */ - if ((LED_Blink(ad, 1 << GPIO_Num_tx, uiTxLedIndex, *timeout, - *time, currdriverstate) == EVENT_SIGNALED) || - (LED_Blink(ad, 1 << GPIO_Num_rx, uiRxLedIndex, *timeout, - *time, currdriverstate) == EVENT_SIGNALED)) - return EVENT_SIGNALED; - } - - if (*time == *time_tx) { - /* Blink pending rate of Rx */ - if (LED_Blink(ad, (1 << GPIO_Num_rx), uiRxLedIndex, *timeout, - *time_rx - *time, - currdriverstate) == EVENT_SIGNALED) - return EVENT_SIGNALED; - - *time = *time_rx; - } else { - /* Blink pending rate of Tx */ - if (LED_Blink(ad, 1 << GPIO_Num_tx, uiTxLedIndex, *timeout, - *time_tx - *time, - currdriverstate) == EVENT_SIGNALED) - return EVENT_SIGNALED; - - *time = *time_tx; - } - - return 0; -} - -static INT LED_Proportional_Blink(struct bcm_mini_adapter *Adapter, - UCHAR GPIO_Num_tx, - UCHAR uiTxLedIndex, - UCHAR GPIO_Num_rx, - UCHAR uiRxLedIndex, - enum bcm_led_events currdriverstate) -{ - /* Initial values of TX and RX packets */ - ULONG64 Initial_num_of_packts_tx = 0, Initial_num_of_packts_rx = 0; - /* values of TX and RX packets after 1 sec */ - ULONG64 Final_num_of_packts_tx = 0, Final_num_of_packts_rx = 0; - /* Rate of transfer of Tx and Rx in 1 sec */ - ULONG64 rate_of_transfer_tx = 0, rate_of_transfer_rx = 0; - int Status = STATUS_SUCCESS; - INT num_of_time = 0, num_of_time_tx = 0, num_of_time_rx = 0; - UINT remDelay = 0; - /* UINT GPIO_num = DISABLE_GPIO_NUM; */ - ulong timeout = 0; - - /* Read initial value of packets sent/received */ - Initial_num_of_packts_tx = Adapter->dev->stats.tx_packets; - Initial_num_of_packts_rx = Adapter->dev->stats.rx_packets; - - /* Scale the rate of transfer to no of blinks. */ - num_of_time_tx = ScaleRateofTransfer((ULONG)rate_of_transfer_tx); - num_of_time_rx = ScaleRateofTransfer((ULONG)rate_of_transfer_rx); - - while ((Adapter->device_removed == false)) { - timeout = 50; - - if (EVENT_SIGNALED == blink_in_normal_bandwidth(Adapter, - &num_of_time, - &num_of_time_tx, - &num_of_time_rx, - GPIO_Num_tx, - uiTxLedIndex, - GPIO_Num_rx, - uiRxLedIndex, - currdriverstate, - &timeout)) - return EVENT_SIGNALED; - - - /* - * If Tx/Rx rate is less than maximum blinks per second, - * wait till delay completes to 1 second - */ - remDelay = MAX_NUM_OF_BLINKS - num_of_time; - if (remDelay > 0) { - timeout = 100 * remDelay; - Status = wait_event_interruptible_timeout( - Adapter->LEDInfo.notify_led_event, - currdriverstate != Adapter->DriverState - || kthread_should_stop(), - msecs_to_jiffies(timeout)); - - if (kthread_should_stop()) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, - LED_DUMP_INFO, DBG_LVL_ALL, - "Led thread got signal to exit..hence exiting"); - Adapter->LEDInfo.led_thread_running = - BCM_LED_THREAD_DISABLED; - return EVENT_SIGNALED; - } - if (Status) - return EVENT_SIGNALED; - } - - /* Turn off both Tx and Rx LEDs before next second */ - TURN_OFF_LED(Adapter, 1 << GPIO_Num_tx, uiTxLedIndex); - TURN_OFF_LED(Adapter, 1 << GPIO_Num_rx, uiTxLedIndex); - - /* - * Read the Tx & Rx packets transmission after 1 second and - * calculate rate of transfer - */ - Final_num_of_packts_tx = Adapter->dev->stats.tx_packets; - Final_num_of_packts_rx = Adapter->dev->stats.rx_packets; - - rate_of_transfer_tx = Final_num_of_packts_tx - - Initial_num_of_packts_tx; - rate_of_transfer_rx = Final_num_of_packts_rx - - Initial_num_of_packts_rx; - - /* Read initial value of packets sent/received */ - Initial_num_of_packts_tx = Final_num_of_packts_tx; - Initial_num_of_packts_rx = Final_num_of_packts_rx; - - /* Scale the rate of transfer to no of blinks. */ - num_of_time_tx = - ScaleRateofTransfer((ULONG)rate_of_transfer_tx); - num_of_time_rx = - ScaleRateofTransfer((ULONG)rate_of_transfer_rx); - - } - return Status; -} - -/* - * ----------------------------------------------------------------------------- - * Procedure: ValidateDSDParamsChecksum - * - * Description: Reads DSD Params and validates checkusm. - * - * Arguments: - * Adapter - Pointer to Adapter structure. - * ulParamOffset - Start offset of the DSD parameter to be read and - * validated. - * usParamLen - Length of the DSD Parameter. - * - * Returns: - * - * ----------------------------------------------------------------------------- - */ -static INT ValidateDSDParamsChecksum(struct bcm_mini_adapter *Adapter, - ULONG ulParamOffset, - USHORT usParamLen) -{ - INT Status = STATUS_SUCCESS; - PUCHAR puBuffer = NULL; - USHORT usChksmOrg = 0; - USHORT usChecksumCalculated = 0; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, - "LED Thread:ValidateDSDParamsChecksum: 0x%lx 0x%X", - ulParamOffset, usParamLen); - - puBuffer = kmalloc(usParamLen, GFP_KERNEL); - if (!puBuffer) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, - DBG_LVL_ALL, - "LED Thread: ValidateDSDParamsChecksum Allocation failed"); - return -ENOMEM; - - } - - /* Read the DSD data from the parameter offset. */ - if (STATUS_SUCCESS != BeceemNVMRead(Adapter, (PUINT)puBuffer, - ulParamOffset, usParamLen)) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, - DBG_LVL_ALL, - "LED Thread: ValidateDSDParamsChecksum BeceemNVMRead failed"); - Status = STATUS_IMAGE_CHECKSUM_MISMATCH; - goto exit; - } - - /* Calculate the checksum of the data read from the DSD parameter. */ - usChecksumCalculated = CFG_CalculateChecksum(puBuffer, usParamLen); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, - "LED Thread: usCheckSumCalculated = 0x%x\n", - usChecksumCalculated); - - /* - * End of the DSD parameter will have a TWO bytes checksum stored in it. - * Read it and compare with the calculated Checksum. - */ - if (STATUS_SUCCESS != BeceemNVMRead(Adapter, (PUINT)&usChksmOrg, - ulParamOffset+usParamLen, 2)) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, - DBG_LVL_ALL, - "LED Thread: ValidateDSDParamsChecksum BeceemNVMRead failed"); - Status = STATUS_IMAGE_CHECKSUM_MISMATCH; - goto exit; - } - usChksmOrg = ntohs(usChksmOrg); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, - "LED Thread: usChksmOrg = 0x%x", usChksmOrg); - - /* - * Compare the checksum calculated with the checksum read - * from DSD section - */ - if (usChecksumCalculated ^ usChksmOrg) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, - DBG_LVL_ALL, - "LED Thread: ValidateDSDParamsChecksum: Checksums don't match"); - Status = STATUS_IMAGE_CHECKSUM_MISMATCH; - goto exit; - } - -exit: - kfree(puBuffer); - return Status; -} - - -/* - * ----------------------------------------------------------------------------- - * Procedure: ValidateHWParmStructure - * - * Description: Validates HW Parameters. - * - * Arguments: - * Adapter - Pointer to Adapter structure. - * ulHwParamOffset - Start offset of the HW parameter Section to be read - * and validated. - * - * Returns: - * - * ----------------------------------------------------------------------------- - */ -static INT ValidateHWParmStructure(struct bcm_mini_adapter *Adapter, - ULONG ulHwParamOffset) -{ - - INT Status = STATUS_SUCCESS; - USHORT HwParamLen = 0; - /* - * Add DSD start offset to the hwParamOffset to get - * the actual address. - */ - ulHwParamOffset += DSD_START_OFFSET; - - /* Read the Length of HW_PARAM structure */ - BeceemNVMRead(Adapter, (PUINT)&HwParamLen, ulHwParamOffset, 2); - HwParamLen = ntohs(HwParamLen); - if (0 == HwParamLen || HwParamLen > Adapter->uiNVMDSDSize) - return STATUS_IMAGE_CHECKSUM_MISMATCH; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, - "LED Thread:HwParamLen = 0x%x", HwParamLen); - Status = ValidateDSDParamsChecksum(Adapter, ulHwParamOffset, - HwParamLen); - return Status; -} /* ValidateHWParmStructure() */ - -static int ReadLEDInformationFromEEPROM(struct bcm_mini_adapter *Adapter, - UCHAR GPIO_Array[]) -{ - int Status = STATUS_SUCCESS; - - ULONG dwReadValue = 0; - USHORT usHwParamData = 0; - USHORT usEEPROMVersion = 0; - UCHAR ucIndex = 0; - UCHAR ucGPIOInfo[32] = {0}; - - BeceemNVMRead(Adapter, (PUINT)&usEEPROMVersion, - EEPROM_VERSION_OFFSET, 2); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, - "usEEPROMVersion: Minor:0x%X Major:0x%x", - usEEPROMVersion & 0xFF, - ((usEEPROMVersion >> 8) & 0xFF)); - - - if (((usEEPROMVersion>>8)&0xFF) < EEPROM_MAP5_MAJORVERSION) { - BeceemNVMRead(Adapter, (PUINT)&usHwParamData, - EEPROM_HW_PARAM_POINTER_ADDRESS, 2); - usHwParamData = ntohs(usHwParamData); - dwReadValue = usHwParamData; - } else { - /* - * Validate Compatibility section and then read HW param - * if compatibility section is valid. - */ - Status = ValidateDSDParamsChecksum(Adapter, - DSD_START_OFFSET, - COMPATIBILITY_SECTION_LENGTH_MAP5); - - if (Status != STATUS_SUCCESS) - return Status; - - BeceemNVMRead(Adapter, (PUINT)&dwReadValue, - EEPROM_HW_PARAM_POINTER_ADDRRES_MAP5, 4); - dwReadValue = ntohl(dwReadValue); - } - - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, - "LED Thread: Start address of HW_PARAM structure = 0x%lx", - dwReadValue); - - /* - * Validate if the address read out is within the DSD. - * Adapter->uiNVMDSDSize gives whole DSD size inclusive of Autoinit. - * lower limit should be above DSD_START_OFFSET and - * upper limit should be below (Adapter->uiNVMDSDSize-DSD_START_OFFSET) - */ - if (dwReadValue < DSD_START_OFFSET || - dwReadValue > (Adapter->uiNVMDSDSize-DSD_START_OFFSET)) - return STATUS_IMAGE_CHECKSUM_MISMATCH; - - Status = ValidateHWParmStructure(Adapter, dwReadValue); - if (Status) - return Status; - - /* - * Add DSD_START_OFFSET to the offset read from the EEPROM. - * This will give the actual start HW Parameters start address. - * To read GPIO section, add GPIO offset further. - */ - - dwReadValue += DSD_START_OFFSET; - /* = start address of hw param section. */ - dwReadValue += GPIO_SECTION_START_OFFSET; - /* = GPIO start offset within HW Param section. */ - - /* - * Read the GPIO values for 32 GPIOs from EEPROM and map the function - * number to GPIO pin number to GPIO_Array - */ - BeceemNVMRead(Adapter, (UINT *)ucGPIOInfo, dwReadValue, 32); - for (ucIndex = 0; ucIndex < 32; ucIndex++) { - - switch (ucGPIOInfo[ucIndex]) { - case RED_LED: - GPIO_Array[RED_LED] = ucIndex; - Adapter->gpioBitMap |= (1 << ucIndex); - break; - case BLUE_LED: - GPIO_Array[BLUE_LED] = ucIndex; - Adapter->gpioBitMap |= (1 << ucIndex); - break; - case YELLOW_LED: - GPIO_Array[YELLOW_LED] = ucIndex; - Adapter->gpioBitMap |= (1 << ucIndex); - break; - case GREEN_LED: - GPIO_Array[GREEN_LED] = ucIndex; - Adapter->gpioBitMap |= (1 << ucIndex); - break; - default: - break; - } - - } - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, - "GPIO's bit map correspond to LED :0x%X", - Adapter->gpioBitMap); - return Status; -} - - -static int ReadConfigFileStructure(struct bcm_mini_adapter *Adapter, - bool *bEnableThread) -{ - int Status = STATUS_SUCCESS; - /* Array to store GPIO numbers from EEPROM */ - UCHAR GPIO_Array[NUM_OF_LEDS+1]; - UINT uiIndex = 0; - UINT uiNum_of_LED_Type = 0; - PUCHAR puCFGData = NULL; - UCHAR bData = 0; - struct bcm_led_state_info *curr_led_state; - - memset(GPIO_Array, DISABLE_GPIO_NUM, NUM_OF_LEDS+1); - - if (!Adapter->pstargetparams || IS_ERR(Adapter->pstargetparams)) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, - DBG_LVL_ALL, "Target Params not Avail.\n"); - return -ENOENT; - } - - /* Populate GPIO_Array with GPIO numbers for LED functions */ - /* Read the GPIO numbers from EEPROM */ - Status = ReadLEDInformationFromEEPROM(Adapter, GPIO_Array); - if (Status == STATUS_IMAGE_CHECKSUM_MISMATCH) { - *bEnableThread = false; - return STATUS_SUCCESS; - } else if (Status) { - *bEnableThread = false; - return Status; - } - - /* - * CONFIG file read successfully. Deallocate the memory of - * uiFileNameBufferSize - */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, - "LED Thread: Config file read successfully\n"); - puCFGData = (PUCHAR) &Adapter->pstargetparams->HostDrvrConfig1; - - /* - * Offset for HostDrvConfig1, HostDrvConfig2, HostDrvConfig3 which - * will have the information of LED type, LED on state for different - * driver state and LED blink state. - */ - - for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) { - bData = *puCFGData; - curr_led_state = &Adapter->LEDInfo.LEDState[uiIndex]; - - /* - * Check Bit 8 for polarity. If it is set, - * polarity is reverse polarity - */ - if (bData & 0x80) { - curr_led_state->BitPolarity = 0; - /* unset the bit 8 */ - bData = bData & 0x7f; - } - - curr_led_state->LED_Type = bData; - if (bData <= NUM_OF_LEDS) - curr_led_state->GPIO_Num = GPIO_Array[bData]; - else - curr_led_state->GPIO_Num = DISABLE_GPIO_NUM; - - puCFGData++; - bData = *puCFGData; - curr_led_state->LED_On_State = bData; - puCFGData++; - bData = *puCFGData; - curr_led_state->LED_Blink_State = bData; - puCFGData++; - } - - /* - * Check if all the LED settings are disabled. If it is disabled, - * dont launch the LED control thread. - */ - for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) { - curr_led_state = &Adapter->LEDInfo.LEDState[uiIndex]; - - if ((curr_led_state->LED_Type == DISABLE_GPIO_NUM) || - (curr_led_state->LED_Type == 0x7f) || - (curr_led_state->LED_Type == 0)) - uiNum_of_LED_Type++; - } - if (uiNum_of_LED_Type >= NUM_OF_LEDS) - *bEnableThread = false; - - return Status; -} - -/* - * ----------------------------------------------------------------------------- - * Procedure: LedGpioInit - * - * Description: Initializes LED GPIOs. Makes the LED GPIOs to OUTPUT mode - * and make the initial state to be OFF. - * - * Arguments: - * Adapter - Pointer to MINI_ADAPTER structure. - * - * Returns: VOID - * - * ----------------------------------------------------------------------------- - */ -static VOID LedGpioInit(struct bcm_mini_adapter *Adapter) -{ - UINT uiResetValue = 0; - UINT uiIndex = 0; - struct bcm_led_state_info *curr_led_state; - - /* Set all LED GPIO Mode to output mode */ - if (rdmalt(Adapter, GPIO_MODE_REGISTER, &uiResetValue, - sizeof(uiResetValue)) < 0) - BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, - DBG_LVL_ALL, "LED Thread: RDM Failed\n"); - for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) { - curr_led_state = &Adapter->LEDInfo.LEDState[uiIndex]; - - if (curr_led_state->GPIO_Num != DISABLE_GPIO_NUM) - uiResetValue |= (1 << curr_led_state->GPIO_Num); - - TURN_OFF_LED(Adapter, 1 << curr_led_state->GPIO_Num, uiIndex); - - } - if (wrmalt(Adapter, GPIO_MODE_REGISTER, &uiResetValue, - sizeof(uiResetValue)) < 0) - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, - DBG_LVL_ALL, "LED Thread: WRM Failed\n"); - - Adapter->LEDInfo.bIdle_led_off = false; -} - -static INT BcmGetGPIOPinInfo(struct bcm_mini_adapter *Adapter, - UCHAR *GPIO_num_tx, - UCHAR *GPIO_num_rx, - UCHAR *uiLedTxIndex, - UCHAR *uiLedRxIndex, - enum bcm_led_events currdriverstate) -{ - UINT uiIndex = 0; - struct bcm_led_state_info *led_state_info; - - *GPIO_num_tx = DISABLE_GPIO_NUM; - *GPIO_num_rx = DISABLE_GPIO_NUM; - - for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) { - led_state_info = &Adapter->LEDInfo.LEDState[uiIndex]; - - if (((currdriverstate == NORMAL_OPERATION) || - (currdriverstate == IDLEMODE_EXIT) || - (currdriverstate == FW_DOWNLOAD)) && - (led_state_info->LED_Blink_State & currdriverstate) && - (led_state_info->GPIO_Num != DISABLE_GPIO_NUM)) { - if (*GPIO_num_tx == DISABLE_GPIO_NUM) { - *GPIO_num_tx = led_state_info->GPIO_Num; - *uiLedTxIndex = uiIndex; - } else { - *GPIO_num_rx = led_state_info->GPIO_Num; - *uiLedRxIndex = uiIndex; - } - } else { - if ((led_state_info->LED_On_State & currdriverstate) && - (led_state_info->GPIO_Num != DISABLE_GPIO_NUM)) { - *GPIO_num_tx = led_state_info->GPIO_Num; - *uiLedTxIndex = uiIndex; - } - } - } - return STATUS_SUCCESS; -} - -static void handle_adapter_driver_state(struct bcm_mini_adapter *ad, - enum bcm_led_events currdriverstate, - UCHAR GPIO_num, - UCHAR dummyGPIONum, - UCHAR uiLedIndex, - UCHAR dummyIndex, - ulong timeout, - UINT uiResetValue, - UINT uiIndex) -{ - switch (ad->DriverState) { - case DRIVER_INIT: - currdriverstate = DRIVER_INIT; - /* ad->DriverState; */ - BcmGetGPIOPinInfo(ad, &GPIO_num, &dummyGPIONum, - &uiLedIndex, &dummyIndex, - currdriverstate); - - if (GPIO_num != DISABLE_GPIO_NUM) - TURN_ON_LED(ad, 1 << GPIO_num, uiLedIndex); - - break; - case FW_DOWNLOAD: - /* - * BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, - * LED_DUMP_INFO, DBG_LVL_ALL, - * "LED Thread: FW_DN_DONE called\n"); - */ - currdriverstate = FW_DOWNLOAD; - BcmGetGPIOPinInfo(ad, &GPIO_num, &dummyGPIONum, - &uiLedIndex, &dummyIndex, - currdriverstate); - - if (GPIO_num != DISABLE_GPIO_NUM) { - timeout = 50; - LED_Blink(ad, 1 << GPIO_num, uiLedIndex, timeout, - -1, currdriverstate); - } - break; - case FW_DOWNLOAD_DONE: - currdriverstate = FW_DOWNLOAD_DONE; - BcmGetGPIOPinInfo(ad, &GPIO_num, &dummyGPIONum, - &uiLedIndex, &dummyIndex, currdriverstate); - if (GPIO_num != DISABLE_GPIO_NUM) - TURN_ON_LED(ad, 1 << GPIO_num, uiLedIndex); - break; - - case SHUTDOWN_EXIT: - /* - * no break, continue to NO_NETWORK_ENTRY - * state as well. - */ - case NO_NETWORK_ENTRY: - currdriverstate = NO_NETWORK_ENTRY; - BcmGetGPIOPinInfo(ad, &GPIO_num, &dummyGPIONum, - &uiLedIndex, &dummyGPIONum, currdriverstate); - if (GPIO_num != DISABLE_GPIO_NUM) - TURN_ON_LED(ad, 1 << GPIO_num, uiLedIndex); - break; - case NORMAL_OPERATION: - { - UCHAR GPIO_num_tx = DISABLE_GPIO_NUM; - UCHAR GPIO_num_rx = DISABLE_GPIO_NUM; - UCHAR uiLEDTx = 0; - UCHAR uiLEDRx = 0; - - currdriverstate = NORMAL_OPERATION; - ad->LEDInfo.bIdle_led_off = false; - - BcmGetGPIOPinInfo(ad, &GPIO_num_tx, &GPIO_num_rx, - &uiLEDTx, &uiLEDRx, currdriverstate); - if ((GPIO_num_tx == DISABLE_GPIO_NUM) && - (GPIO_num_rx == DISABLE_GPIO_NUM)) { - GPIO_num = DISABLE_GPIO_NUM; - } else { - /* - * If single LED is selected, use same - * for both Tx and Rx - */ - if (GPIO_num_tx == DISABLE_GPIO_NUM) { - GPIO_num_tx = GPIO_num_rx; - uiLEDTx = uiLEDRx; - } else if (GPIO_num_rx == DISABLE_GPIO_NUM) { - GPIO_num_rx = GPIO_num_tx; - uiLEDRx = uiLEDTx; - } - /* - * Blink the LED in proportionate - * to Tx and Rx transmissions. - */ - LED_Proportional_Blink(ad, - GPIO_num_tx, uiLEDTx, - GPIO_num_rx, uiLEDRx, - currdriverstate); - } - } - break; - case LOWPOWER_MODE_ENTER: - currdriverstate = LOWPOWER_MODE_ENTER; - if (DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING == - ad->ulPowerSaveMode) { - /* Turn OFF all the LED */ - uiResetValue = 0; - for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) { - if (ad->LEDInfo.LEDState[uiIndex].GPIO_Num != DISABLE_GPIO_NUM) - TURN_OFF_LED(ad, - (1 << ad->LEDInfo.LEDState[uiIndex].GPIO_Num), - uiIndex); - } - - } - /* Turn off LED And WAKE-UP for Sendinf IDLE mode ACK */ - ad->LEDInfo.bLedInitDone = false; - ad->LEDInfo.bIdle_led_off = TRUE; - wake_up(&ad->LEDInfo.idleModeSyncEvent); - GPIO_num = DISABLE_GPIO_NUM; - break; - case IDLEMODE_CONTINUE: - currdriverstate = IDLEMODE_CONTINUE; - GPIO_num = DISABLE_GPIO_NUM; - break; - case IDLEMODE_EXIT: - break; - case DRIVER_HALT: - currdriverstate = DRIVER_HALT; - GPIO_num = DISABLE_GPIO_NUM; - for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) { - if (ad->LEDInfo.LEDState[uiIndex].GPIO_Num != - DISABLE_GPIO_NUM) - TURN_OFF_LED(ad, - (1 << ad->LEDInfo.LEDState[uiIndex].GPIO_Num), - uiIndex); - } - /* ad->DriverState = DRIVER_INIT; */ - break; - case LED_THREAD_INACTIVE: - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, LED_DUMP_INFO, - DBG_LVL_ALL, "InActivating LED thread..."); - currdriverstate = LED_THREAD_INACTIVE; - ad->LEDInfo.led_thread_running = - BCM_LED_THREAD_RUNNING_INACTIVELY; - ad->LEDInfo.bLedInitDone = false; - /* disable ALL LED */ - for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) { - if (ad->LEDInfo.LEDState[uiIndex].GPIO_Num != - DISABLE_GPIO_NUM) - TURN_OFF_LED(ad, - (1 << ad->LEDInfo.LEDState[uiIndex].GPIO_Num), - uiIndex); - } - break; - case LED_THREAD_ACTIVE: - BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, LED_DUMP_INFO, - DBG_LVL_ALL, "Activating LED thread again..."); - if (ad->LinkUpStatus == false) - ad->DriverState = NO_NETWORK_ENTRY; - else - ad->DriverState = NORMAL_OPERATION; - - ad->LEDInfo.led_thread_running = - BCM_LED_THREAD_RUNNING_ACTIVELY; - break; - /* return; */ - default: - break; - } -} - -static VOID LEDControlThread(struct bcm_mini_adapter *Adapter) -{ - UINT uiIndex = 0; - UCHAR GPIO_num = 0; - UCHAR uiLedIndex = 0; - UINT uiResetValue = 0; - enum bcm_led_events currdriverstate = 0; - ulong timeout = 0; - - INT Status = 0; - - UCHAR dummyGPIONum = 0; - UCHAR dummyIndex = 0; - - /* currdriverstate = Adapter->DriverState; */ - Adapter->LEDInfo.bIdleMode_tx_from_host = false; - - /* - * Wait till event is triggered - * - * wait_event(Adapter->LEDInfo.notify_led_event, - * currdriverstate!= Adapter->DriverState); - */ - - GPIO_num = DISABLE_GPIO_NUM; - - while (TRUE) { - /* Wait till event is triggered */ - if ((GPIO_num == DISABLE_GPIO_NUM) - || - ((currdriverstate != FW_DOWNLOAD) && - (currdriverstate != NORMAL_OPERATION) && - (currdriverstate != LOWPOWER_MODE_ENTER)) - || - (currdriverstate == LED_THREAD_INACTIVE)) - Status = wait_event_interruptible( - Adapter->LEDInfo.notify_led_event, - currdriverstate != Adapter->DriverState - || kthread_should_stop()); - - if (kthread_should_stop() || Adapter->device_removed) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, - DBG_LVL_ALL, - "Led thread got signal to exit..hence exiting"); - Adapter->LEDInfo.led_thread_running = - BCM_LED_THREAD_DISABLED; - TURN_OFF_LED(Adapter, 1 << GPIO_num, uiLedIndex); - return; /* STATUS_FAILURE; */ - } - - if (GPIO_num != DISABLE_GPIO_NUM) - TURN_OFF_LED(Adapter, 1 << GPIO_num, uiLedIndex); - - if (Adapter->LEDInfo.bLedInitDone == false) { - LedGpioInit(Adapter); - Adapter->LEDInfo.bLedInitDone = TRUE; - } - - handle_adapter_driver_state(Adapter, - currdriverstate, - GPIO_num, - dummyGPIONum, - uiLedIndex, - dummyIndex, - timeout, - uiResetValue, - uiIndex - ); - } - Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_DISABLED; -} - -int InitLedSettings(struct bcm_mini_adapter *Adapter) -{ - int Status = STATUS_SUCCESS; - bool bEnableThread = TRUE; - UCHAR uiIndex = 0; - - /* - * Initially set BitPolarity to normal polarity. The bit 8 of LED type - * is used to change the polarity of the LED. - */ - - for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) - Adapter->LEDInfo.LEDState[uiIndex].BitPolarity = 1; - - /* - * Read the LED settings of CONFIG file and map it - * to GPIO numbers in EEPROM - */ - Status = ReadConfigFileStructure(Adapter, &bEnableThread); - if (STATUS_SUCCESS != Status) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, - DBG_LVL_ALL, - "LED Thread: FAILED in ReadConfigFileStructure\n"); - return Status; - } - - if (Adapter->LEDInfo.led_thread_running) { - if (bEnableThread) { - ; - } else { - Adapter->DriverState = DRIVER_HALT; - wake_up(&Adapter->LEDInfo.notify_led_event); - Adapter->LEDInfo.led_thread_running = - BCM_LED_THREAD_DISABLED; - } - - } else if (bEnableThread) { - /* Create secondary thread to handle the LEDs */ - init_waitqueue_head(&Adapter->LEDInfo.notify_led_event); - init_waitqueue_head(&Adapter->LEDInfo.idleModeSyncEvent); - Adapter->LEDInfo.led_thread_running = - BCM_LED_THREAD_RUNNING_ACTIVELY; - Adapter->LEDInfo.bIdle_led_off = false; - Adapter->LEDInfo.led_cntrl_threadid = - kthread_run((int (*)(void *)) LEDControlThread, - Adapter, "led_control_thread"); - if (IS_ERR(Adapter->LEDInfo.led_cntrl_threadid)) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, - DBG_LVL_ALL, - "Not able to spawn Kernel Thread\n"); - Adapter->LEDInfo.led_thread_running = - BCM_LED_THREAD_DISABLED; - return PTR_ERR(Adapter->LEDInfo.led_cntrl_threadid); - } - } - return Status; -} diff --git a/drivers/staging/bcm/led_control.h b/drivers/staging/bcm/led_control.h deleted file mode 100644 index 1b24bf4658af..000000000000 --- a/drivers/staging/bcm/led_control.h +++ /dev/null @@ -1,84 +0,0 @@ -#ifndef _LED_CONTROL_H -#define _LED_CONTROL_H - -#define NUM_OF_LEDS 4 -#define DSD_START_OFFSET 0x0200 -#define EEPROM_VERSION_OFFSET 0x020E -#define EEPROM_HW_PARAM_POINTER_ADDRESS 0x0218 -#define EEPROM_HW_PARAM_POINTER_ADDRRES_MAP5 0x0220 -#define GPIO_SECTION_START_OFFSET 0x03 -#define COMPATIBILITY_SECTION_LENGTH 42 -#define COMPATIBILITY_SECTION_LENGTH_MAP5 84 -#define EEPROM_MAP5_MAJORVERSION 5 -#define EEPROM_MAP5_MINORVERSION 0 -#define MAX_NUM_OF_BLINKS 10 -#define NUM_OF_GPIO_PINS 16 -#define DISABLE_GPIO_NUM 0xFF -#define EVENT_SIGNALED 1 -#define MAX_FILE_NAME_BUFFER_SIZE 100 - -#define TURN_ON_LED(ad, GPIO, index) do { \ - unsigned int gpio_val = GPIO; \ - (ad->LEDInfo.LEDState[index].BitPolarity == 1) ? \ - wrmaltWithLock(ad, BCM_GPIO_OUTPUT_SET_REG, &gpio_val, sizeof(gpio_val)) : \ - wrmaltWithLock(ad, BCM_GPIO_OUTPUT_CLR_REG, &gpio_val, sizeof(gpio_val)); \ - } while (0) - -#define TURN_OFF_LED(ad, GPIO, index) do { \ - unsigned int gpio_val = GPIO; \ - (ad->LEDInfo.LEDState[index].BitPolarity == 1) ? \ - wrmaltWithLock(ad, BCM_GPIO_OUTPUT_CLR_REG, &gpio_val, sizeof(gpio_val)) : \ - wrmaltWithLock(ad, BCM_GPIO_OUTPUT_SET_REG, &gpio_val, sizeof(gpio_val)); \ - } while (0) - -enum bcm_led_colors { - RED_LED = 1, - BLUE_LED = 2, - YELLOW_LED = 3, - GREEN_LED = 4 -}; - -enum bcm_led_events { - SHUTDOWN_EXIT = 0x00, - DRIVER_INIT = 0x1, - FW_DOWNLOAD = 0x2, - FW_DOWNLOAD_DONE = 0x4, - NO_NETWORK_ENTRY = 0x8, - NORMAL_OPERATION = 0x10, - LOWPOWER_MODE_ENTER = 0x20, - IDLEMODE_CONTINUE = 0x40, - IDLEMODE_EXIT = 0x80, - LED_THREAD_INACTIVE = 0x100, /* Makes the LED thread Inactivce. It wil be equivallent to putting the thread on hold. */ - LED_THREAD_ACTIVE = 0x200, /* Makes the LED Thread Active back. */ - DRIVER_HALT = 0xff -}; /* Enumerated values of different driver states */ - -/* - * Structure which stores the information of different LED types - * and corresponding LED state information of driver states - */ -struct bcm_led_state_info { - unsigned char LED_Type; /* specify GPIO number - use 0xFF if not used */ - unsigned char LED_On_State; /* Bits set or reset for different states */ - unsigned char LED_Blink_State; /* Bits set or reset for blinking LEDs for different states */ - unsigned char GPIO_Num; - unsigned char BitPolarity; /* To represent whether H/W is normal polarity or reverse polarity */ -}; - -struct bcm_led_info { - struct bcm_led_state_info LEDState[NUM_OF_LEDS]; - bool bIdleMode_tx_from_host; /* Variable to notify whether driver came out from idlemode due to Host or target */ - bool bIdle_led_off; - wait_queue_head_t notify_led_event; - wait_queue_head_t idleModeSyncEvent; - struct task_struct *led_cntrl_threadid; - int led_thread_running; - bool bLedInitDone; -}; - -/* LED Thread state. */ -#define BCM_LED_THREAD_DISABLED 0 /* LED Thread is not running. */ -#define BCM_LED_THREAD_RUNNING_ACTIVELY 1 /* LED thread is running. */ -#define BCM_LED_THREAD_RUNNING_INACTIVELY 2 /* LED thread has been put on hold */ - -#endif diff --git a/drivers/staging/bcm/nvm.c b/drivers/staging/bcm/nvm.c deleted file mode 100644 index ce09473fbb1f..000000000000 --- a/drivers/staging/bcm/nvm.c +++ /dev/null @@ -1,4661 +0,0 @@ -#include "headers.h" - -#define DWORD unsigned int - -static int BcmDoChipSelect(struct bcm_mini_adapter *Adapter, - unsigned int offset); -static int BcmGetActiveDSD(struct bcm_mini_adapter *Adapter); -static int BcmGetActiveISO(struct bcm_mini_adapter *Adapter); -static unsigned int BcmGetEEPROMSize(struct bcm_mini_adapter *Adapter); -static int BcmGetFlashCSInfo(struct bcm_mini_adapter *Adapter); -static unsigned int BcmGetFlashSectorSize(struct bcm_mini_adapter *Adapter, - unsigned int FlashSectorSizeSig, - unsigned int FlashSectorSize); - -static VOID BcmValidateNvmType(struct bcm_mini_adapter *Adapter); -static int BcmGetNvmSize(struct bcm_mini_adapter *Adapter); -static unsigned int BcmGetFlashSize(struct bcm_mini_adapter *Adapter); -static enum bcm_nvm_type BcmGetNvmType(struct bcm_mini_adapter *Adapter); - -static int BcmGetSectionValEndOffset(struct bcm_mini_adapter *Adapter, - enum bcm_flash2x_section_val eFlash2xSectionVal); - -static B_UINT8 IsOffsetWritable(struct bcm_mini_adapter *Adapter, - unsigned int uiOffset); -static int IsSectionWritable(struct bcm_mini_adapter *Adapter, - enum bcm_flash2x_section_val Section); -static int IsSectionExistInVendorInfo(struct bcm_mini_adapter *Adapter, - enum bcm_flash2x_section_val section); - -static int ReadDSDPriority(struct bcm_mini_adapter *Adapter, - enum bcm_flash2x_section_val dsd); -static int ReadDSDSignature(struct bcm_mini_adapter *Adapter, - enum bcm_flash2x_section_val dsd); -static int ReadISOPriority(struct bcm_mini_adapter *Adapter, - enum bcm_flash2x_section_val iso); -static int ReadISOSignature(struct bcm_mini_adapter *Adapter, - enum bcm_flash2x_section_val iso); - -static int CorruptDSDSig(struct bcm_mini_adapter *Adapter, - enum bcm_flash2x_section_val eFlash2xSectionVal); -static int CorruptISOSig(struct bcm_mini_adapter *Adapter, - enum bcm_flash2x_section_val eFlash2xSectionVal); -static int SaveHeaderIfPresent(struct bcm_mini_adapter *Adapter, - PUCHAR pBuff, - unsigned int uiSectAlignAddr); -static int WriteToFlashWithoutSectorErase(struct bcm_mini_adapter *Adapter, - PUINT pBuff, - enum bcm_flash2x_section_val eFlash2xSectionVal, - unsigned int uiOffset, - unsigned int uiNumBytes); -static enum bcm_flash2x_section_val getHighestPriDSD(struct bcm_mini_adapter *Adapter); -static enum bcm_flash2x_section_val getHighestPriISO(struct bcm_mini_adapter *Adapter); - -static int BeceemFlashBulkRead( - struct bcm_mini_adapter *Adapter, - PUINT pBuffer, - unsigned int uiOffset, - unsigned int uiNumBytes); - -static int BeceemFlashBulkWrite( - struct bcm_mini_adapter *Adapter, - PUINT pBuffer, - unsigned int uiOffset, - unsigned int uiNumBytes, - bool bVerify); - -static int GetFlashBaseAddr(struct bcm_mini_adapter *Adapter); - -static int ReadBeceemEEPROMBulk(struct bcm_mini_adapter *Adapter, unsigned int dwAddress, unsigned int *pdwData, unsigned int dwNumData); - -/* Procedure: ReadEEPROMStatusRegister - * - * Description: Reads the standard EEPROM Status Register. - * - * Arguments: - * Adapter - ptr to Adapter object instance - * Returns: - * OSAL_STATUS_CODE - */ -static UCHAR ReadEEPROMStatusRegister(struct bcm_mini_adapter *Adapter) -{ - UCHAR uiData = 0; - DWORD dwRetries = MAX_EEPROM_RETRIES * RETRIES_PER_DELAY; - unsigned int uiStatus = 0; - unsigned int value = 0; - unsigned int value1 = 0; - - /* Read the EEPROM status register */ - value = EEPROM_READ_STATUS_REGISTER; - wrmalt(Adapter, EEPROM_CMDQ_SPI_REG, &value, sizeof(value)); - - while (dwRetries != 0) { - value = 0; - uiStatus = 0; - rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &uiStatus, sizeof(uiStatus)); - if (Adapter->device_removed == TRUE) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Modem has got removed hence exiting...."); - break; - } - - /* Wait for Avail bit to be set. */ - if ((uiStatus & EEPROM_READ_DATA_AVAIL) != 0) { - /* Clear the Avail/Full bits - which ever is set. */ - value = uiStatus & (EEPROM_READ_DATA_AVAIL | EEPROM_READ_DATA_FULL); - wrmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value)); - - value = 0; - rdmalt(Adapter, EEPROM_READ_DATAQ_REG, &value, sizeof(value)); - uiData = (UCHAR)value; - - break; - } - - dwRetries--; - if (dwRetries == 0) { - rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value)); - rdmalt(Adapter, EEPROM_SPI_Q_STATUS_REG, &value1, sizeof(value1)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "0x3004 = %x 0x3008 = %x, retries = %d failed.\n", value, value1, MAX_EEPROM_RETRIES * RETRIES_PER_DELAY); - return uiData; - } - if (!(dwRetries%RETRIES_PER_DELAY)) - udelay(1000); - uiStatus = 0; - } - return uiData; -} /* ReadEEPROMStatusRegister */ - -/* - * Procedure: ReadBeceemEEPROMBulk - * - * Description: This routine reads 16Byte data from EEPROM - * - * Arguments: - * Adapter - ptr to Adapter object instance - * dwAddress - EEPROM Offset to read the data from. - * pdwData - Pointer to double word where data needs to be stored in. // dwNumWords - Number of words. Valid values are 4 ONLY. - * - * Returns: - * OSAL_STATUS_CODE: - */ - -static int ReadBeceemEEPROMBulk(struct bcm_mini_adapter *Adapter, - DWORD dwAddress, - DWORD *pdwData, - DWORD dwNumWords) -{ - DWORD dwIndex = 0; - DWORD dwRetries = MAX_EEPROM_RETRIES * RETRIES_PER_DELAY; - unsigned int uiStatus = 0; - unsigned int value = 0; - unsigned int value1 = 0; - UCHAR *pvalue; - - /* Flush the read and cmd queue. */ - value = (EEPROM_READ_QUEUE_FLUSH | EEPROM_CMD_QUEUE_FLUSH); - wrmalt(Adapter, SPI_FLUSH_REG, &value, sizeof(value)); - value = 0; - wrmalt(Adapter, SPI_FLUSH_REG, &value, sizeof(value)); - - /* Clear the Avail/Full bits. */ - value = (EEPROM_READ_DATA_AVAIL | EEPROM_READ_DATA_FULL); - wrmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value)); - - value = dwAddress | ((dwNumWords == 4) ? EEPROM_16_BYTE_PAGE_READ : EEPROM_4_BYTE_PAGE_READ); - wrmalt(Adapter, EEPROM_CMDQ_SPI_REG, &value, sizeof(value)); - - while (dwRetries != 0) { - uiStatus = 0; - rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &uiStatus, sizeof(uiStatus)); - if (Adapter->device_removed == TRUE) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Modem has got Removed.hence exiting from loop..."); - return -ENODEV; - } - - /* If we are reading 16 bytes we want to be sure that the queue - * is full before we read. In the other cases we are ok if the - * queue has data available - */ - if (dwNumWords == 4) { - if ((uiStatus & EEPROM_READ_DATA_FULL) != 0) { - /* Clear the Avail/Full bits - which ever is set. */ - value = (uiStatus & (EEPROM_READ_DATA_AVAIL | EEPROM_READ_DATA_FULL)); - wrmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value)); - break; - } - } else if (dwNumWords == 1) { - if ((uiStatus & EEPROM_READ_DATA_AVAIL) != 0) { - /* We just got Avail and we have to read 32bits so we - * need this sleep for Cardbus kind of devices. - */ - if (Adapter->chip_id == 0xBECE0210) - udelay(800); - - /* Clear the Avail/Full bits - which ever is set. */ - value = (uiStatus & (EEPROM_READ_DATA_AVAIL | EEPROM_READ_DATA_FULL)); - wrmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value)); - break; - } - } - - uiStatus = 0; - - dwRetries--; - if (dwRetries == 0) { - value = 0; - value1 = 0; - rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value)); - rdmalt(Adapter, EEPROM_SPI_Q_STATUS_REG, &value1, sizeof(value1)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "dwNumWords %d 0x3004 = %x 0x3008 = %x retries = %d failed.\n", - dwNumWords, value, value1, MAX_EEPROM_RETRIES * RETRIES_PER_DELAY); - return STATUS_FAILURE; - } - - if (!(dwRetries%RETRIES_PER_DELAY)) - udelay(1000); - } - - for (dwIndex = 0; dwIndex < dwNumWords; dwIndex++) { - /* We get only a byte at a time - from LSB to MSB. We shift it into an integer. */ - pvalue = (PUCHAR)(pdwData + dwIndex); - - value = 0; - rdmalt(Adapter, EEPROM_READ_DATAQ_REG, &value, sizeof(value)); - - pvalue[0] = value; - - value = 0; - rdmalt(Adapter, EEPROM_READ_DATAQ_REG, &value, sizeof(value)); - - pvalue[1] = value; - - value = 0; - rdmalt(Adapter, EEPROM_READ_DATAQ_REG, &value, sizeof(value)); - - pvalue[2] = value; - - value = 0; - rdmalt(Adapter, EEPROM_READ_DATAQ_REG, &value, sizeof(value)); - - pvalue[3] = value; - } - - return STATUS_SUCCESS; -} /* ReadBeceemEEPROMBulk() */ - -/* - * Procedure: ReadBeceemEEPROM - * - * Description: This routine reads 4 data from EEPROM. It uses 1 or 2 page - * reads to do this operation. - * - * Arguments: - * Adapter - ptr to Adapter object instance - * uiOffset - EEPROM Offset to read the data from. - * pBuffer - Pointer to word where data needs to be stored in. - * - * Returns: - * OSAL_STATUS_CODE: - */ - -int ReadBeceemEEPROM(struct bcm_mini_adapter *Adapter, - DWORD uiOffset, - DWORD *pBuffer) -{ - unsigned int uiData[8] = {0}; - unsigned int uiByteOffset = 0; - unsigned int uiTempOffset = 0; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, " ====> "); - - uiTempOffset = uiOffset - (uiOffset % MAX_RW_SIZE); - uiByteOffset = uiOffset - uiTempOffset; - - ReadBeceemEEPROMBulk(Adapter, uiTempOffset, (PUINT)&uiData[0], 4); - - /* A word can overlap at most over 2 pages. In that case we read the - * next page too. - */ - if (uiByteOffset > 12) - ReadBeceemEEPROMBulk(Adapter, uiTempOffset + MAX_RW_SIZE, (PUINT)&uiData[4], 4); - - memcpy((PUCHAR)pBuffer, (((PUCHAR)&uiData[0]) + uiByteOffset), 4); - - return STATUS_SUCCESS; -} /* ReadBeceemEEPROM() */ - -int ReadMacAddressFromNVM(struct bcm_mini_adapter *Adapter) -{ - int Status; - unsigned char puMacAddr[6]; - - Status = BeceemNVMRead(Adapter, - (PUINT)&puMacAddr[0], - INIT_PARAMS_1_MACADDRESS_ADDRESS, - MAC_ADDRESS_SIZE); - - if (Status == STATUS_SUCCESS) - memcpy(Adapter->dev->dev_addr, puMacAddr, MAC_ADDRESS_SIZE); - - return Status; -} - -/* - * Procedure: BeceemEEPROMBulkRead - * - * Description: Reads the EEPROM and returns the Data. - * - * Arguments: - * Adapter - ptr to Adapter object instance - * pBuffer - Buffer to store the data read from EEPROM - * uiOffset - Offset of EEPROM from where data should be read - * uiNumBytes - Number of bytes to be read from the EEPROM. - * - * Returns: - * OSAL_STATUS_SUCCESS - if EEPROM read is successful. - * - if failed. - */ - -int BeceemEEPROMBulkRead(struct bcm_mini_adapter *Adapter, - PUINT pBuffer, - unsigned int uiOffset, - unsigned int uiNumBytes) -{ - unsigned int uiData[4] = {0}; - /* unsigned int uiAddress = 0; */ - unsigned int uiBytesRemaining = uiNumBytes; - unsigned int uiIndex = 0; - unsigned int uiTempOffset = 0; - unsigned int uiExtraBytes = 0; - unsigned int uiFailureRetries = 0; - PUCHAR pcBuff = (PUCHAR)pBuffer; - - if (uiOffset % MAX_RW_SIZE && uiBytesRemaining) { - uiTempOffset = uiOffset - (uiOffset % MAX_RW_SIZE); - uiExtraBytes = uiOffset - uiTempOffset; - ReadBeceemEEPROMBulk(Adapter, uiTempOffset, (PUINT)&uiData[0], 4); - if (uiBytesRemaining >= (MAX_RW_SIZE - uiExtraBytes)) { - memcpy(pBuffer, (((PUCHAR)&uiData[0]) + uiExtraBytes), MAX_RW_SIZE - uiExtraBytes); - uiBytesRemaining -= (MAX_RW_SIZE - uiExtraBytes); - uiIndex += (MAX_RW_SIZE - uiExtraBytes); - uiOffset += (MAX_RW_SIZE - uiExtraBytes); - } else { - memcpy(pBuffer, (((PUCHAR)&uiData[0]) + uiExtraBytes), uiBytesRemaining); - uiIndex += uiBytesRemaining; - uiOffset += uiBytesRemaining; - uiBytesRemaining = 0; - } - } - - while (uiBytesRemaining && uiFailureRetries != 128) { - if (Adapter->device_removed) - return -1; - - if (uiBytesRemaining >= MAX_RW_SIZE) { - /* For the requests more than or equal to 16 bytes, use bulk - * read function to make the access faster. - * We read 4 Dwords of data - */ - if (ReadBeceemEEPROMBulk(Adapter, uiOffset, &uiData[0], 4) == 0) { - memcpy(pcBuff + uiIndex, &uiData[0], MAX_RW_SIZE); - uiOffset += MAX_RW_SIZE; - uiBytesRemaining -= MAX_RW_SIZE; - uiIndex += MAX_RW_SIZE; - } else { - uiFailureRetries++; - mdelay(3); /* sleep for a while before retry... */ - } - } else if (uiBytesRemaining >= 4) { - if (ReadBeceemEEPROM(Adapter, uiOffset, &uiData[0]) == 0) { - memcpy(pcBuff + uiIndex, &uiData[0], 4); - uiOffset += 4; - uiBytesRemaining -= 4; - uiIndex += 4; - } else { - uiFailureRetries++; - mdelay(3); /* sleep for a while before retry... */ - } - } else { - /* Handle the reads less than 4 bytes... */ - PUCHAR pCharBuff = (PUCHAR)pBuffer; - - pCharBuff += uiIndex; - if (ReadBeceemEEPROM(Adapter, uiOffset, &uiData[0]) == 0) { - memcpy(pCharBuff, &uiData[0], uiBytesRemaining); /* copy only bytes requested. */ - uiBytesRemaining = 0; - } else { - uiFailureRetries++; - mdelay(3); /* sleep for a while before retry... */ - } - } - } - - return 0; -} - -/* - * Procedure: BeceemFlashBulkRead - * - * Description: Reads the FLASH and returns the Data. - * - * Arguments: - * Adapter - ptr to Adapter object instance - * pBuffer - Buffer to store the data read from FLASH - * uiOffset - Offset of FLASH from where data should be read - * uiNumBytes - Number of bytes to be read from the FLASH. - * - * Returns: - * OSAL_STATUS_SUCCESS - if FLASH read is successful. - * - if failed. - */ - -static int BeceemFlashBulkRead(struct bcm_mini_adapter *Adapter, - PUINT pBuffer, - unsigned int uiOffset, - unsigned int uiNumBytes) -{ - unsigned int uiIndex = 0; - unsigned int uiBytesToRead = uiNumBytes; - int Status = 0; - unsigned int uiPartOffset = 0; - int bytes; - - if (Adapter->device_removed) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device Got Removed"); - return -ENODEV; - } - - /* Adding flash Base address - * uiOffset = uiOffset + GetFlashBaseAddr(Adapter); - */ - #if defined(BCM_SHM_INTERFACE) && !defined(FLASH_DIRECT_ACCESS) - Status = bcmflash_raw_read((uiOffset/FLASH_PART_SIZE), (uiOffset % FLASH_PART_SIZE), (unsigned char *)pBuffer, uiNumBytes); - return Status; - #endif - - Adapter->SelectedChip = RESET_CHIP_SELECT; - - if (uiOffset % MAX_RW_SIZE) { - BcmDoChipSelect(Adapter, uiOffset); - uiPartOffset = (uiOffset & (FLASH_PART_SIZE - 1)) + GetFlashBaseAddr(Adapter); - - uiBytesToRead = MAX_RW_SIZE - (uiOffset % MAX_RW_SIZE); - uiBytesToRead = MIN(uiNumBytes, uiBytesToRead); - - bytes = rdm(Adapter, uiPartOffset, (PCHAR)pBuffer + uiIndex, uiBytesToRead); - if (bytes < 0) { - Status = bytes; - Adapter->SelectedChip = RESET_CHIP_SELECT; - return Status; - } - - uiIndex += uiBytesToRead; - uiOffset += uiBytesToRead; - uiNumBytes -= uiBytesToRead; - } - - while (uiNumBytes) { - BcmDoChipSelect(Adapter, uiOffset); - uiPartOffset = (uiOffset & (FLASH_PART_SIZE - 1)) + GetFlashBaseAddr(Adapter); - - uiBytesToRead = MIN(uiNumBytes, MAX_RW_SIZE); - - bytes = rdm(Adapter, uiPartOffset, (PCHAR)pBuffer + uiIndex, uiBytesToRead); - if (bytes < 0) { - Status = bytes; - break; - } - - uiIndex += uiBytesToRead; - uiOffset += uiBytesToRead; - uiNumBytes -= uiBytesToRead; - } - Adapter->SelectedChip = RESET_CHIP_SELECT; - return Status; -} - -/* - * Procedure: BcmGetFlashSize - * - * Description: Finds the size of FLASH. - * - * Arguments: - * Adapter - ptr to Adapter object instance - * - * Returns: - * unsigned int - size of the FLASH Storage. - * - */ - -static unsigned int BcmGetFlashSize(struct bcm_mini_adapter *Adapter) -{ - if (IsFlash2x(Adapter)) - return Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + sizeof(struct bcm_dsd_header); - else - return 32 * 1024; -} - -/* - * Procedure: BcmGetEEPROMSize - * - * Description: Finds the size of EEPROM. - * - * Arguments: - * Adapter - ptr to Adapter object instance - * - * Returns: - * unsigned int - size of the EEPROM Storage. - * - */ - -static unsigned int BcmGetEEPROMSize(struct bcm_mini_adapter *Adapter) -{ - unsigned int uiData = 0; - unsigned int uiIndex = 0; - - /* - * if EEPROM is present and already Calibrated,it will have - * 'BECM' string at 0th offset. - * To find the EEPROM size read the possible boundaries of the - * EEPROM like 4K,8K etc..accessing the EEPROM beyond its size will - * result in wrap around. So when we get the End of the EEPROM we will - * get 'BECM' string which is indeed at offset 0. - */ - BeceemEEPROMBulkRead(Adapter, &uiData, 0x0, 4); - if (uiData == BECM) { - for (uiIndex = 2; uiIndex <= 256; uiIndex *= 2) { - BeceemEEPROMBulkRead(Adapter, &uiData, uiIndex * 1024, 4); - if (uiData == BECM) - return uiIndex * 1024; - } - } else { - /* - * EEPROM may not be present or not programmed - */ - uiData = 0xBABEFACE; - if (BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&uiData, 0, 4, TRUE) == 0) { - uiData = 0; - for (uiIndex = 2; uiIndex <= 256; uiIndex *= 2) { - BeceemEEPROMBulkRead(Adapter, &uiData, uiIndex * 1024, 4); - if (uiData == 0xBABEFACE) - return uiIndex * 1024; - } - } - } - return 0; -} - -/* - * Procedure: FlashSectorErase - * - * Description: Finds the sector size of the FLASH. - * - * Arguments: - * Adapter - ptr to Adapter object instance - * addr - sector start address - * numOfSectors - number of sectors to be erased. - * - * Returns: - * OSAL_STATUS_CODE - * - */ - -static int FlashSectorErase(struct bcm_mini_adapter *Adapter, - unsigned int addr, - unsigned int numOfSectors) -{ - unsigned int iIndex = 0, iRetries = 0; - unsigned int uiStatus = 0; - unsigned int value; - int bytes; - - for (iIndex = 0; iIndex < numOfSectors; iIndex++) { - value = 0x06000000; - wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)); - - value = (0xd8000000 | (addr & 0xFFFFFF)); - wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)); - iRetries = 0; - - do { - value = (FLASH_CMD_STATUS_REG_READ << 24); - if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Programing of FLASH_SPI_CMDQ_REG fails"); - return STATUS_FAILURE; - } - - bytes = rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus)); - if (bytes < 0) { - uiStatus = bytes; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reading status of FLASH_SPI_READQ_REG fails"); - return uiStatus; - } - iRetries++; - /* After every try lets make the CPU free for 10 ms. generally time taken by the - * the sector erase cycle is 500 ms to 40000 msec. hence sleeping 10 ms - * won't hamper performance in any case. - */ - mdelay(10); - } while ((uiStatus & 0x1) && (iRetries < 400)); - - if (uiStatus & 0x1) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "iRetries crossing the limit of 80000\n"); - return STATUS_FAILURE; - } - - addr += Adapter->uiSectorSize; - } - return 0; -} -/* - * Procedure: flashByteWrite - * - * Description: Performs Byte by Byte write to flash - * - * Arguments: - * Adapter - ptr to Adapter object instance - * uiOffset - Offset of the flash where data needs to be written to. - * pData - Address of Data to be written. - * Returns: - * OSAL_STATUS_CODE - * - */ - -static int flashByteWrite(struct bcm_mini_adapter *Adapter, - unsigned int uiOffset, - PVOID pData) -{ - unsigned int uiStatus = 0; - int iRetries = MAX_FLASH_RETRIES * FLASH_PER_RETRIES_DELAY; /* 3 */ - unsigned int value; - ULONG ulData = *(PUCHAR)pData; - int bytes; - /* - * need not write 0xFF because write requires an erase and erase will - * make whole sector 0xFF. - */ - - if (0xFF == ulData) - return STATUS_SUCCESS; - - /* DumpDebug(NVM_RW,("flashWrite ====>\n")); */ - value = (FLASH_CMD_WRITE_ENABLE << 24); - if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Write enable in FLASH_SPI_CMDQ_REG register fails"); - return STATUS_FAILURE; - } - - if (wrm(Adapter, FLASH_SPI_WRITEQ_REG, (PCHAR)&ulData, 4) < 0) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "DATA Write on FLASH_SPI_WRITEQ_REG fails"); - return STATUS_FAILURE; - } - value = (0x02000000 | (uiOffset & 0xFFFFFF)); - if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Programming of FLASH_SPI_CMDQ_REG fails"); - return STATUS_FAILURE; - } - - /* __udelay(950); */ - - do { - value = (FLASH_CMD_STATUS_REG_READ << 24); - if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Programing of FLASH_SPI_CMDQ_REG fails"); - return STATUS_FAILURE; - } - /* __udelay(1); */ - bytes = rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus)); - if (bytes < 0) { - uiStatus = bytes; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reading status of FLASH_SPI_READQ_REG fails"); - return uiStatus; - } - iRetries--; - if (iRetries && ((iRetries % FLASH_PER_RETRIES_DELAY) == 0)) - udelay(1000); - - } while ((uiStatus & 0x1) && (iRetries > 0)); - - if (uiStatus & 0x1) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Write fails even after checking status for 200 times."); - return STATUS_FAILURE; - } - - return STATUS_SUCCESS; -} - -/* - * Procedure: flashWrite - * - * Description: Performs write to flash - * - * Arguments: - * Adapter - ptr to Adapter object instance - * uiOffset - Offset of the flash where data needs to be written to. - * pData - Address of Data to be written. - * Returns: - * OSAL_STATUS_CODE - * - */ - -static int flashWrite(struct bcm_mini_adapter *Adapter, - unsigned int uiOffset, - PVOID pData) -{ - /* unsigned int uiStatus = 0; - * int iRetries = 0; - * unsigned int uiReadBack = 0; - */ - unsigned int uiStatus = 0; - int iRetries = MAX_FLASH_RETRIES * FLASH_PER_RETRIES_DELAY; /* 3 */ - unsigned int value; - unsigned int uiErasePattern[4] = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}; - int bytes; - /* - * need not write 0xFFFFFFFF because write requires an erase and erase will - * make whole sector 0xFFFFFFFF. - */ - if (!memcmp(pData, uiErasePattern, MAX_RW_SIZE)) - return 0; - - value = (FLASH_CMD_WRITE_ENABLE << 24); - - if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Write Enable of FLASH_SPI_CMDQ_REG fails"); - return STATUS_FAILURE; - } - - if (wrm(Adapter, uiOffset, (PCHAR)pData, MAX_RW_SIZE) < 0) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Data write fails..."); - return STATUS_FAILURE; - } - - /* __udelay(950); */ - do { - value = (FLASH_CMD_STATUS_REG_READ << 24); - if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Programing of FLASH_SPI_CMDQ_REG fails"); - return STATUS_FAILURE; - } - /* __udelay(1); */ - bytes = rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus)); - if (bytes < 0) { - uiStatus = bytes; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reading status of FLASH_SPI_READQ_REG fails"); - return uiStatus; - } - - iRetries--; - /* this will ensure that in there will be no changes in the current path. - * currently one rdm/wrm takes 125 us. - * Hence 125 *2 * FLASH_PER_RETRIES_DELAY > 3 ms(worst case delay) - * Hence current implementation cycle will intoduce no delay in current path - */ - if (iRetries && ((iRetries % FLASH_PER_RETRIES_DELAY) == 0)) - udelay(1000); - } while ((uiStatus & 0x1) && (iRetries > 0)); - - if (uiStatus & 0x1) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Write fails even after checking status for 200 times."); - return STATUS_FAILURE; - } - - return STATUS_SUCCESS; -} - -/*----------------------------------------------------------------------------- - * Procedure: flashByteWriteStatus - * - * Description: Performs byte by byte write to flash with write done status check - * - * Arguments: - * Adapter - ptr to Adapter object instance - * uiOffset - Offset of the flash where data needs to be written to. - * pData - Address of the Data to be written. - * Returns: - * OSAL_STATUS_CODE - * - */ -static int flashByteWriteStatus(struct bcm_mini_adapter *Adapter, - unsigned int uiOffset, - PVOID pData) -{ - unsigned int uiStatus = 0; - int iRetries = MAX_FLASH_RETRIES * FLASH_PER_RETRIES_DELAY; /* 3 */ - ULONG ulData = *(PUCHAR)pData; - unsigned int value; - int bytes; - - /* - * need not write 0xFFFFFFFF because write requires an erase and erase will - * make whole sector 0xFFFFFFFF. - */ - - if (0xFF == ulData) - return STATUS_SUCCESS; - - /* DumpDebug(NVM_RW,("flashWrite ====>\n")); */ - - value = (FLASH_CMD_WRITE_ENABLE << 24); - if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Write enable in FLASH_SPI_CMDQ_REG register fails"); - return STATUS_SUCCESS; - } - if (wrm(Adapter, FLASH_SPI_WRITEQ_REG, (PCHAR)&ulData, 4) < 0) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "DATA Write on FLASH_SPI_WRITEQ_REG fails"); - return STATUS_FAILURE; - } - value = (0x02000000 | (uiOffset & 0xFFFFFF)); - if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Programming of FLASH_SPI_CMDQ_REG fails"); - return STATUS_FAILURE; - } - - /* msleep(1); */ - - do { - value = (FLASH_CMD_STATUS_REG_READ << 24); - if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Programing of FLASH_SPI_CMDQ_REG fails"); - return STATUS_FAILURE; - } - /* __udelay(1); */ - bytes = rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus)); - if (bytes < 0) { - uiStatus = bytes; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reading status of FLASH_SPI_READQ_REG fails"); - return uiStatus; - } - - iRetries--; - if (iRetries && ((iRetries % FLASH_PER_RETRIES_DELAY) == 0)) - udelay(1000); - - } while ((uiStatus & 0x1) && (iRetries > 0)); - - if (uiStatus & 0x1) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Write fails even after checking status for 200 times."); - return STATUS_FAILURE; - } - - return STATUS_SUCCESS; -} -/* - * Procedure: flashWriteStatus - * - * Description: Performs write to flash with write done status check - * - * Arguments: - * Adapter - ptr to Adapter object instance - * uiOffset - Offset of the flash where data needs to be written to. - * pData - Address of the Data to be written. - * Returns: - * OSAL_STATUS_CODE - * - */ - -static int flashWriteStatus(struct bcm_mini_adapter *Adapter, - unsigned int uiOffset, - PVOID pData) -{ - unsigned int uiStatus = 0; - int iRetries = MAX_FLASH_RETRIES * FLASH_PER_RETRIES_DELAY; /* 3 */ - /* unsigned int uiReadBack = 0; */ - unsigned int value; - unsigned int uiErasePattern[4] = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}; - int bytes; - - /* - * need not write 0xFFFFFFFF because write requires an erase and erase will - * make whole sector 0xFFFFFFFF. - */ - if (!memcmp(pData, uiErasePattern, MAX_RW_SIZE)) - return 0; - - value = (FLASH_CMD_WRITE_ENABLE << 24); - if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Write Enable of FLASH_SPI_CMDQ_REG fails"); - return STATUS_FAILURE; - } - - if (wrm(Adapter, uiOffset, (PCHAR)pData, MAX_RW_SIZE) < 0) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Data write fails..."); - return STATUS_FAILURE; - } - /* __udelay(1); */ - - do { - value = (FLASH_CMD_STATUS_REG_READ << 24); - if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Programing of FLASH_SPI_CMDQ_REG fails"); - return STATUS_FAILURE; - } - /* __udelay(1); */ - bytes = rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus)); - if (bytes < 0) { - uiStatus = bytes; - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reading status of FLASH_SPI_READQ_REG fails"); - return uiStatus; - } - iRetries--; - /* this will ensure that in there will be no changes in the current path. - * currently one rdm/wrm takes 125 us. - * Hence 125 *2 * FLASH_PER_RETRIES_DELAY >3 ms(worst case delay) - * Hence current implementation cycle will intoduce no delay in current path - */ - if (iRetries && ((iRetries % FLASH_PER_RETRIES_DELAY) == 0)) - udelay(1000); - - } while ((uiStatus & 0x1) && (iRetries > 0)); - - if (uiStatus & 0x1) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Write fails even after checking status for 200 times."); - return STATUS_FAILURE; - } - - return STATUS_SUCCESS; -} - -/* - * Procedure: BcmRestoreBlockProtectStatus - * - * Description: Restores the original block protection status. - * - * Arguments: - * Adapter - ptr to Adapter object instance - * ulWriteStatus -Original status - * Returns: - * - * - */ - -static VOID BcmRestoreBlockProtectStatus(struct bcm_mini_adapter *Adapter, ULONG ulWriteStatus) -{ - unsigned int value; - - value = (FLASH_CMD_WRITE_ENABLE << 24); - wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)); - - udelay(20); - value = (FLASH_CMD_STATUS_REG_WRITE << 24) | (ulWriteStatus << 16); - wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)); - udelay(20); -} - -/* - * Procedure: BcmFlashUnProtectBlock - * - * Description: UnProtects appropriate blocks for writing. - * - * Arguments: - * Adapter - ptr to Adapter object instance - * uiOffset - Offset of the flash where data needs to be written to. This should be Sector aligned. - * Returns: - * ULONG - Status value before UnProtect. - * - */ - -static ULONG BcmFlashUnProtectBlock(struct bcm_mini_adapter *Adapter, unsigned int uiOffset, unsigned int uiLength) -{ - ULONG ulStatus = 0; - ULONG ulWriteStatus = 0; - unsigned int value; - - uiOffset = uiOffset&0x000FFFFF; - /* - * Implemented only for 1MB Flash parts. - */ - if (FLASH_PART_SST25VF080B == Adapter->ulFlashID) { - /* - * Get Current BP status. - */ - value = (FLASH_CMD_STATUS_REG_READ << 24); - wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)); - udelay(10); - /* - * Read status will be WWXXYYZZ. We have to take only WW. - */ - rdmalt(Adapter, FLASH_SPI_READQ_REG, (PUINT)&ulStatus, sizeof(ulStatus)); - ulStatus >>= 24; - ulWriteStatus = ulStatus; - /* - * Bits [5-2] give current block level protection status. - * Bit5: BP3 - DONT CARE - * BP2-BP0: 0 - NO PROTECTION, 1 - UPPER 1/16, 2 - UPPER 1/8, 3 - UPPER 1/4 - * 4 - UPPER 1/2. 5 to 7 - ALL BLOCKS - */ - - if (ulStatus) { - if ((uiOffset+uiLength) <= 0x80000) { - /* - * Offset comes in lower half of 1MB. Protect the upper half. - * Clear BP1 and BP0 and set BP2. - */ - ulWriteStatus |= (0x4<<2); - ulWriteStatus &= ~(0x3<<2); - } else if ((uiOffset + uiLength) <= 0xC0000) { - /* - * Offset comes below Upper 1/4. Upper 1/4 can be protected. - * Clear BP2 and set BP1 and BP0. - */ - ulWriteStatus |= (0x3<<2); - ulWriteStatus &= ~(0x1<<4); - } else if ((uiOffset + uiLength) <= 0xE0000) { - /* - * Offset comes below Upper 1/8. Upper 1/8 can be protected. - * Clear BP2 and BP0 and set BP1 - */ - ulWriteStatus |= (0x1<<3); - ulWriteStatus &= ~(0x5<<2); - } else if ((uiOffset + uiLength) <= 0xF0000) { - /* - * Offset comes below Upper 1/16. Only upper 1/16 can be protected. - * Set BP0 and Clear BP2,BP1. - */ - ulWriteStatus |= (0x1<<2); - ulWriteStatus &= ~(0x3<<3); - } else { - /* - * Unblock all. - * Clear BP2,BP1 and BP0. - */ - ulWriteStatus &= ~(0x7<<2); - } - - value = (FLASH_CMD_WRITE_ENABLE << 24); - wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)); - udelay(20); - value = (FLASH_CMD_STATUS_REG_WRITE << 24) | (ulWriteStatus << 16); - wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)); - udelay(20); - } - } - return ulStatus; -} - -static int bulk_read_complete_sector(struct bcm_mini_adapter *ad, - UCHAR read_bk[], - PCHAR tmpbuff, - unsigned int offset, - unsigned int partoff) -{ - unsigned int i; - int j; - int bulk_read_stat; - FP_FLASH_WRITE_STATUS writef = - ad->fpFlashWriteWithStatusCheck; - - for (i = 0; i < ad->uiSectorSize; i += MAX_RW_SIZE) { - bulk_read_stat = BeceemFlashBulkRead(ad, - (PUINT)read_bk, - offset + i, - MAX_RW_SIZE); - - if (bulk_read_stat != STATUS_SUCCESS) - continue; - - if (ad->ulFlashWriteSize == 1) { - for (j = 0; j < 16; j++) { - if ((read_bk[j] != tmpbuff[i + j]) && - (STATUS_SUCCESS != (*writef)(ad, partoff + i + j, &tmpbuff[i + j]))) { - return STATUS_FAILURE; - } - } - } else { - if ((memcmp(read_bk, &tmpbuff[i], MAX_RW_SIZE)) && - (STATUS_SUCCESS != (*writef)(ad, partoff + i, &tmpbuff[i]))) { - return STATUS_FAILURE; - } - } - } - - return STATUS_SUCCESS; -} - -/* - * Procedure: BeceemFlashBulkWrite - * - * Description: Performs write to the flash - * - * Arguments: - * Adapter - ptr to Adapter object instance - * pBuffer - Data to be written. - * uiOffset - Offset of the flash where data needs to be written to. - * uiNumBytes - Number of bytes to be written. - * bVerify - read verify flag. - * Returns: - * OSAL_STATUS_CODE - * - */ - -static int BeceemFlashBulkWrite(struct bcm_mini_adapter *Adapter, - PUINT pBuffer, - unsigned int uiOffset, - unsigned int uiNumBytes, - bool bVerify) -{ - PCHAR pTempBuff = NULL; - PUCHAR pcBuffer = (PUCHAR)pBuffer; - unsigned int uiIndex = 0; - unsigned int uiOffsetFromSectStart = 0; - unsigned int uiSectAlignAddr = 0; - unsigned int uiCurrSectOffsetAddr = 0; - unsigned int uiSectBoundary = 0; - unsigned int uiNumSectTobeRead = 0; - UCHAR ucReadBk[16] = {0}; - ULONG ulStatus = 0; - int Status = STATUS_SUCCESS; - unsigned int uiTemp = 0; - unsigned int index = 0; - unsigned int uiPartOffset = 0; - - #if defined(BCM_SHM_INTERFACE) && !defined(FLASH_DIRECT_ACCESS) - Status = bcmflash_raw_write((uiOffset / FLASH_PART_SIZE), (uiOffset % FLASH_PART_SIZE), (unsigned char *)pBuffer, uiNumBytes); - return Status; - #endif - - uiOffsetFromSectStart = uiOffset & ~(Adapter->uiSectorSize - 1); - - /* Adding flash Base address - * uiOffset = uiOffset + GetFlashBaseAddr(Adapter); - */ - - uiSectAlignAddr = uiOffset & ~(Adapter->uiSectorSize - 1); - uiCurrSectOffsetAddr = uiOffset & (Adapter->uiSectorSize - 1); - uiSectBoundary = uiSectAlignAddr + Adapter->uiSectorSize; - - pTempBuff = kmalloc(Adapter->uiSectorSize, GFP_KERNEL); - if (!pTempBuff) - goto BeceemFlashBulkWrite_EXIT; - /* - * check if the data to be written is overlapped across sectors - */ - if (uiOffset+uiNumBytes < uiSectBoundary) { - uiNumSectTobeRead = 1; - } else { - /* Number of sectors = Last sector start address/First sector start address */ - uiNumSectTobeRead = (uiCurrSectOffsetAddr + uiNumBytes) / Adapter->uiSectorSize; - if ((uiCurrSectOffsetAddr + uiNumBytes)%Adapter->uiSectorSize) - uiNumSectTobeRead++; - } - /* Check whether Requested sector is writable or not in case of flash2x write. But if write call is - * for DSD calibration, allow it without checking of sector permission - */ - - if (IsFlash2x(Adapter) && (Adapter->bAllDSDWriteAllow == false)) { - index = 0; - uiTemp = uiNumSectTobeRead; - while (uiTemp) { - if (IsOffsetWritable(Adapter, uiOffsetFromSectStart + index * Adapter->uiSectorSize) == false) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Sector Starting at offset <0X%X> is not writable", - (uiOffsetFromSectStart + index * Adapter->uiSectorSize)); - Status = SECTOR_IS_NOT_WRITABLE; - goto BeceemFlashBulkWrite_EXIT; - } - uiTemp = uiTemp - 1; - index = index + 1; - } - } - Adapter->SelectedChip = RESET_CHIP_SELECT; - while (uiNumSectTobeRead) { - /* do_gettimeofday(&tv1); - * BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "\nTime In start of write :%ld ms\n",(tv1.tv_sec *1000 + tv1.tv_usec /1000)); - */ - uiPartOffset = (uiSectAlignAddr & (FLASH_PART_SIZE - 1)) + GetFlashBaseAddr(Adapter); - - BcmDoChipSelect(Adapter, uiSectAlignAddr); - - if (0 != BeceemFlashBulkRead(Adapter, - (PUINT)pTempBuff, - uiOffsetFromSectStart, - Adapter->uiSectorSize)) { - Status = -1; - goto BeceemFlashBulkWrite_EXIT; - } - - /* do_gettimeofday(&tr); - * BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Total time taken by Read :%ld ms\n", (tr.tv_sec *1000 + tr.tv_usec/1000) - (tv1.tv_sec *1000 + tv1.tv_usec/1000)); - */ - ulStatus = BcmFlashUnProtectBlock(Adapter, uiSectAlignAddr, Adapter->uiSectorSize); - - if (uiNumSectTobeRead > 1) { - memcpy(&pTempBuff[uiCurrSectOffsetAddr], pcBuffer, uiSectBoundary - (uiSectAlignAddr + uiCurrSectOffsetAddr)); - pcBuffer += ((uiSectBoundary - (uiSectAlignAddr + uiCurrSectOffsetAddr))); - uiNumBytes -= (uiSectBoundary - (uiSectAlignAddr + uiCurrSectOffsetAddr)); - } else { - memcpy(&pTempBuff[uiCurrSectOffsetAddr], pcBuffer, uiNumBytes); - } - - if (IsFlash2x(Adapter)) - SaveHeaderIfPresent(Adapter, (PUCHAR)pTempBuff, uiOffsetFromSectStart); - - FlashSectorErase(Adapter, uiPartOffset, 1); - /* do_gettimeofday(&te); - * BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Total time taken by Erase :%ld ms\n", (te.tv_sec *1000 + te.tv_usec/1000) - (tr.tv_sec *1000 + tr.tv_usec/1000)); - */ - for (uiIndex = 0; uiIndex < Adapter->uiSectorSize; uiIndex += Adapter->ulFlashWriteSize) { - if (Adapter->device_removed) { - Status = -1; - goto BeceemFlashBulkWrite_EXIT; - } - - if (STATUS_SUCCESS != (*Adapter->fpFlashWrite)(Adapter, uiPartOffset + uiIndex, (&pTempBuff[uiIndex]))) { - Status = -1; - goto BeceemFlashBulkWrite_EXIT; - } - } - - /* do_gettimeofday(&tw); - * BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Total time taken in Write to Flash :%ld ms\n", (tw.tv_sec *1000 + tw.tv_usec/1000) - (te.tv_sec *1000 + te.tv_usec/1000)); - */ - - if (STATUS_FAILURE == bulk_read_complete_sector(Adapter, - ucReadBk, - pTempBuff, - uiOffsetFromSectStart, - uiPartOffset)) { - Status = STATUS_FAILURE; - goto BeceemFlashBulkWrite_EXIT; - } - - /* do_gettimeofday(&twv); - * BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Total time taken in Write to Flash verification :%ld ms\n", (twv.tv_sec *1000 + twv.tv_usec/1000) - (tw.tv_sec *1000 + tw.tv_usec/1000)); - */ - if (ulStatus) { - BcmRestoreBlockProtectStatus(Adapter, ulStatus); - ulStatus = 0; - } - - uiCurrSectOffsetAddr = 0; - uiSectAlignAddr = uiSectBoundary; - uiSectBoundary += Adapter->uiSectorSize; - uiOffsetFromSectStart += Adapter->uiSectorSize; - uiNumSectTobeRead--; - } - /* do_gettimeofday(&tv2); - * BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Time after Write :%ld ms\n",(tv2.tv_sec *1000 + tv2.tv_usec/1000)); - * BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Total time taken by in Write is :%ld ms\n", (tv2.tv_sec *1000 + tv2.tv_usec/1000) - (tv1.tv_sec *1000 + tv1.tv_usec/1000)); - * - * Cleanup. - */ -BeceemFlashBulkWrite_EXIT: - if (ulStatus) - BcmRestoreBlockProtectStatus(Adapter, ulStatus); - - kfree(pTempBuff); - - Adapter->SelectedChip = RESET_CHIP_SELECT; - return Status; -} - -/* - * Procedure: BeceemFlashBulkWriteStatus - * - * Description: Writes to Flash. Checks the SPI status after each write. - * - * Arguments: - * Adapter - ptr to Adapter object instance - * pBuffer - Data to be written. - * uiOffset - Offset of the flash where data needs to be written to. - * uiNumBytes - Number of bytes to be written. - * bVerify - read verify flag. - * Returns: - * OSAL_STATUS_CODE - * - */ - -static int BeceemFlashBulkWriteStatus(struct bcm_mini_adapter *Adapter, - PUINT pBuffer, - unsigned int uiOffset, - unsigned int uiNumBytes, - bool bVerify) -{ - PCHAR pTempBuff = NULL; - PUCHAR pcBuffer = (PUCHAR)pBuffer; - unsigned int uiIndex = 0; - unsigned int uiOffsetFromSectStart = 0; - unsigned int uiSectAlignAddr = 0; - unsigned int uiCurrSectOffsetAddr = 0; - unsigned int uiSectBoundary = 0; - unsigned int uiNumSectTobeRead = 0; - UCHAR ucReadBk[16] = {0}; - ULONG ulStatus = 0; - unsigned int Status = STATUS_SUCCESS; - unsigned int uiTemp = 0; - unsigned int index = 0; - unsigned int uiPartOffset = 0; - - uiOffsetFromSectStart = uiOffset & ~(Adapter->uiSectorSize - 1); - - /* uiOffset += Adapter->ulFlashCalStart; - * Adding flash Base address - * uiOffset = uiOffset + GetFlashBaseAddr(Adapter); - */ - uiSectAlignAddr = uiOffset & ~(Adapter->uiSectorSize - 1); - uiCurrSectOffsetAddr = uiOffset & (Adapter->uiSectorSize - 1); - uiSectBoundary = uiSectAlignAddr + Adapter->uiSectorSize; - - pTempBuff = kmalloc(Adapter->uiSectorSize, GFP_KERNEL); - if (!pTempBuff) - goto BeceemFlashBulkWriteStatus_EXIT; - - /* - * check if the data to be written is overlapped across sectors - */ - if (uiOffset+uiNumBytes < uiSectBoundary) { - uiNumSectTobeRead = 1; - } else { - /* Number of sectors = Last sector start address/First sector start address */ - uiNumSectTobeRead = (uiCurrSectOffsetAddr + uiNumBytes) / Adapter->uiSectorSize; - if ((uiCurrSectOffsetAddr + uiNumBytes)%Adapter->uiSectorSize) - uiNumSectTobeRead++; - } - - if (IsFlash2x(Adapter) && (Adapter->bAllDSDWriteAllow == false)) { - index = 0; - uiTemp = uiNumSectTobeRead; - while (uiTemp) { - if (IsOffsetWritable(Adapter, uiOffsetFromSectStart + index * Adapter->uiSectorSize) == false) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Sector Starting at offset <0X%x> is not writable", - (uiOffsetFromSectStart + index * Adapter->uiSectorSize)); - Status = SECTOR_IS_NOT_WRITABLE; - goto BeceemFlashBulkWriteStatus_EXIT; - } - uiTemp = uiTemp - 1; - index = index + 1; - } - } - - Adapter->SelectedChip = RESET_CHIP_SELECT; - while (uiNumSectTobeRead) { - uiPartOffset = (uiSectAlignAddr & (FLASH_PART_SIZE - 1)) + GetFlashBaseAddr(Adapter); - - BcmDoChipSelect(Adapter, uiSectAlignAddr); - if (0 != BeceemFlashBulkRead(Adapter, - (PUINT)pTempBuff, - uiOffsetFromSectStart, - Adapter->uiSectorSize)) { - Status = -1; - goto BeceemFlashBulkWriteStatus_EXIT; - } - - ulStatus = BcmFlashUnProtectBlock(Adapter, uiOffsetFromSectStart, Adapter->uiSectorSize); - - if (uiNumSectTobeRead > 1) { - memcpy(&pTempBuff[uiCurrSectOffsetAddr], pcBuffer, uiSectBoundary - (uiSectAlignAddr + uiCurrSectOffsetAddr)); - pcBuffer += ((uiSectBoundary - (uiSectAlignAddr + uiCurrSectOffsetAddr))); - uiNumBytes -= (uiSectBoundary - (uiSectAlignAddr + uiCurrSectOffsetAddr)); - } else { - memcpy(&pTempBuff[uiCurrSectOffsetAddr], pcBuffer, uiNumBytes); - } - - if (IsFlash2x(Adapter)) - SaveHeaderIfPresent(Adapter, (PUCHAR)pTempBuff, uiOffsetFromSectStart); - - FlashSectorErase(Adapter, uiPartOffset, 1); - - for (uiIndex = 0; uiIndex < Adapter->uiSectorSize; uiIndex += Adapter->ulFlashWriteSize) { - if (Adapter->device_removed) { - Status = -1; - goto BeceemFlashBulkWriteStatus_EXIT; - } - - if (STATUS_SUCCESS != (*Adapter->fpFlashWriteWithStatusCheck)(Adapter, uiPartOffset+uiIndex, &pTempBuff[uiIndex])) { - Status = -1; - goto BeceemFlashBulkWriteStatus_EXIT; - } - } - - if (bVerify) { - for (uiIndex = 0; uiIndex < Adapter->uiSectorSize; uiIndex += MAX_RW_SIZE) { - if (STATUS_SUCCESS == BeceemFlashBulkRead(Adapter, (PUINT)ucReadBk, uiOffsetFromSectStart + uiIndex, MAX_RW_SIZE)) { - if (memcmp(ucReadBk, &pTempBuff[uiIndex], MAX_RW_SIZE)) { - Status = STATUS_FAILURE; - goto BeceemFlashBulkWriteStatus_EXIT; - } - } - } - } - - if (ulStatus) { - BcmRestoreBlockProtectStatus(Adapter, ulStatus); - ulStatus = 0; - } - - uiCurrSectOffsetAddr = 0; - uiSectAlignAddr = uiSectBoundary; - uiSectBoundary += Adapter->uiSectorSize; - uiOffsetFromSectStart += Adapter->uiSectorSize; - uiNumSectTobeRead--; - } -/* - * Cleanup. - */ -BeceemFlashBulkWriteStatus_EXIT: - if (ulStatus) - BcmRestoreBlockProtectStatus(Adapter, ulStatus); - - kfree(pTempBuff); - Adapter->SelectedChip = RESET_CHIP_SELECT; - return Status; -} - -/* - * Procedure: PropagateCalParamsFromFlashToMemory - * - * Description: Dumps the calibration section of EEPROM to DDR. - * - * Arguments: - * Adapter - ptr to Adapter object instance - * Returns: - * OSAL_STATUS_CODE - * - */ - -int PropagateCalParamsFromFlashToMemory(struct bcm_mini_adapter *Adapter) -{ - PCHAR pBuff, pPtr; - unsigned int uiEepromSize = 0; - unsigned int uiBytesToCopy = 0; - /* unsigned int uiIndex = 0; */ - unsigned int uiCalStartAddr = EEPROM_CALPARAM_START; - unsigned int uiMemoryLoc = EEPROM_CAL_DATA_INTERNAL_LOC; - unsigned int value; - int Status = 0; - - /* - * Write the signature first. This will ensure firmware does not access EEPROM. - */ - value = 0xbeadbead; - wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 4, &value, sizeof(value)); - value = 0xbeadbead; - wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 8, &value, sizeof(value)); - - if (0 != BeceemNVMRead(Adapter, &uiEepromSize, EEPROM_SIZE_OFFSET, 4)) - return -1; - - uiEepromSize = ntohl(uiEepromSize); - uiEepromSize >>= 16; - - /* - * subtract the auto init section size - */ - uiEepromSize -= EEPROM_CALPARAM_START; - - if (uiEepromSize > 1024 * 1024) - return -1; - - pBuff = kmalloc(uiEepromSize, GFP_KERNEL); - if (pBuff == NULL) - return -ENOMEM; - - if (0 != BeceemNVMRead(Adapter, (PUINT)pBuff, uiCalStartAddr, uiEepromSize)) { - kfree(pBuff); - return -1; - } - - pPtr = pBuff; - - uiBytesToCopy = MIN(BUFFER_4K, uiEepromSize); - - while (uiBytesToCopy) { - Status = wrm(Adapter, uiMemoryLoc, (PCHAR)pPtr, uiBytesToCopy); - if (Status) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "wrm failed with status :%d", Status); - break; - } - - pPtr += uiBytesToCopy; - uiEepromSize -= uiBytesToCopy; - uiMemoryLoc += uiBytesToCopy; - uiBytesToCopy = MIN(BUFFER_4K, uiEepromSize); - } - - kfree(pBuff); - return Status; -} - -/* - * Procedure: BeceemEEPROMReadBackandVerify - * - * Description: Read back the data written and verifies. - * - * Arguments: - * Adapter - ptr to Adapter object instance - * pBuffer - Data to be written. - * uiOffset - Offset of the flash where data needs to be written to. - * uiNumBytes - Number of bytes to be written. - * Returns: - * OSAL_STATUS_CODE - * - */ - -static int BeceemEEPROMReadBackandVerify(struct bcm_mini_adapter *Adapter, - PUINT pBuffer, - unsigned int uiOffset, - unsigned int uiNumBytes) -{ - unsigned int uiRdbk = 0; - unsigned int uiIndex = 0; - unsigned int uiData = 0; - unsigned int auiData[4] = {0}; - - while (uiNumBytes) { - if (Adapter->device_removed) - return -1; - - if (uiNumBytes >= MAX_RW_SIZE) { - /* for the requests more than or equal to MAX_RW_SIZE bytes, use bulk read function to make the access faster. */ - BeceemEEPROMBulkRead(Adapter, &auiData[0], uiOffset, MAX_RW_SIZE); - - if (memcmp(&pBuffer[uiIndex], &auiData[0], MAX_RW_SIZE)) { - /* re-write */ - BeceemEEPROMBulkWrite(Adapter, (PUCHAR)(pBuffer + uiIndex), uiOffset, MAX_RW_SIZE, false); - mdelay(3); - BeceemEEPROMBulkRead(Adapter, &auiData[0], uiOffset, MAX_RW_SIZE); - - if (memcmp(&pBuffer[uiIndex], &auiData[0], MAX_RW_SIZE)) - return -1; - } - uiOffset += MAX_RW_SIZE; - uiNumBytes -= MAX_RW_SIZE; - uiIndex += 4; - } else if (uiNumBytes >= 4) { - BeceemEEPROMBulkRead(Adapter, &uiData, uiOffset, 4); - if (uiData != pBuffer[uiIndex]) { - /* re-write */ - BeceemEEPROMBulkWrite(Adapter, (PUCHAR)(pBuffer + uiIndex), uiOffset, 4, false); - mdelay(3); - BeceemEEPROMBulkRead(Adapter, &uiData, uiOffset, 4); - if (uiData != pBuffer[uiIndex]) - return -1; - } - uiOffset += 4; - uiNumBytes -= 4; - uiIndex++; - } else { - /* Handle the reads less than 4 bytes... */ - uiData = 0; - memcpy(&uiData, ((PUCHAR)pBuffer) + (uiIndex * sizeof(unsigned int)), uiNumBytes); - BeceemEEPROMBulkRead(Adapter, &uiRdbk, uiOffset, 4); - - if (memcmp(&uiData, &uiRdbk, uiNumBytes)) - return -1; - - uiNumBytes = 0; - } - } - - return 0; -} - -static VOID BcmSwapWord(unsigned int *ptr1) -{ - unsigned int tempval = (unsigned int)*ptr1; - char *ptr2 = (char *)&tempval; - char *ptr = (char *)ptr1; - - ptr[0] = ptr2[3]; - ptr[1] = ptr2[2]; - ptr[2] = ptr2[1]; - ptr[3] = ptr2[0]; -} - -/* - * Procedure: BeceemEEPROMWritePage - * - * Description: Performs page write (16bytes) to the EEPROM - * - * Arguments: - * Adapter - ptr to Adapter object instance - * uiData - Data to be written. - * uiOffset - Offset of the EEPROM where data needs to be written to. - * Returns: - * OSAL_STATUS_CODE - * - */ - -static int BeceemEEPROMWritePage(struct bcm_mini_adapter *Adapter, unsigned int uiData[], unsigned int uiOffset) -{ - unsigned int uiRetries = MAX_EEPROM_RETRIES * RETRIES_PER_DELAY; - unsigned int uiStatus = 0; - UCHAR uiEpromStatus = 0; - unsigned int value = 0; - - /* Flush the Write/Read/Cmd queues. */ - value = (EEPROM_WRITE_QUEUE_FLUSH | EEPROM_CMD_QUEUE_FLUSH | EEPROM_READ_QUEUE_FLUSH); - wrmalt(Adapter, SPI_FLUSH_REG, &value, sizeof(value)); - value = 0; - wrmalt(Adapter, SPI_FLUSH_REG, &value, sizeof(value)); - - /* Clear the Empty/Avail/Full bits. After this it has been confirmed - * that the bit was cleared by reading back the register. See NOTE below. - * We also clear the Read queues as we do a EEPROM status register read - * later. - */ - value = (EEPROM_WRITE_QUEUE_EMPTY | EEPROM_WRITE_QUEUE_AVAIL | EEPROM_WRITE_QUEUE_FULL | EEPROM_READ_DATA_AVAIL | EEPROM_READ_DATA_FULL); - wrmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value)); - - /* Enable write */ - value = EEPROM_WRITE_ENABLE; - wrmalt(Adapter, EEPROM_CMDQ_SPI_REG, &value, sizeof(value)); - - /* We can write back to back 8bits * 16 into the queue and as we have - * checked for the queue to be empty we can write in a burst. - */ - - value = uiData[0]; - BcmSwapWord(&value); - wrm(Adapter, EEPROM_WRITE_DATAQ_REG, (PUCHAR)&value, 4); - - value = uiData[1]; - BcmSwapWord(&value); - wrm(Adapter, EEPROM_WRITE_DATAQ_REG, (PUCHAR)&value, 4); - - value = uiData[2]; - BcmSwapWord(&value); - wrm(Adapter, EEPROM_WRITE_DATAQ_REG, (PUCHAR)&value, 4); - - value = uiData[3]; - BcmSwapWord(&value); - wrm(Adapter, EEPROM_WRITE_DATAQ_REG, (PUCHAR)&value, 4); - - /* NOTE : After this write, on readback of EEPROM_SPI_Q_STATUS1_REG - * shows that we see 7 for the EEPROM data write. Which means that - * queue got full, also space is available as well as the queue is empty. - * This may happen in sequence. - */ - value = EEPROM_16_BYTE_PAGE_WRITE | uiOffset; - wrmalt(Adapter, EEPROM_CMDQ_SPI_REG, &value, sizeof(value)); - - /* Ideally we should loop here without tries and eventually succeed. - * What we are checking if the previous write has completed, and this - * may take time. We should wait till the Empty bit is set. - */ - uiStatus = 0; - rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &uiStatus, sizeof(uiStatus)); - while ((uiStatus & EEPROM_WRITE_QUEUE_EMPTY) == 0) { - uiRetries--; - if (uiRetries == 0) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "0x0f003004 = %x, %d retries failed.\n", uiStatus, MAX_EEPROM_RETRIES * RETRIES_PER_DELAY); - return STATUS_FAILURE; - } - - if (!(uiRetries%RETRIES_PER_DELAY)) - udelay(1000); - - uiStatus = 0; - rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &uiStatus, sizeof(uiStatus)); - if (Adapter->device_removed == TRUE) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Modem got removed hence exiting from loop...."); - return -ENODEV; - } - } - - if (uiRetries != 0) { - /* Clear the ones that are set - either, Empty/Full/Avail bits */ - value = (uiStatus & (EEPROM_WRITE_QUEUE_EMPTY | EEPROM_WRITE_QUEUE_AVAIL | EEPROM_WRITE_QUEUE_FULL)); - wrmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value)); - } - - /* Here we should check if the EEPROM status register is correct before - * proceeding. Bit 0 in the EEPROM Status register should be 0 before - * we proceed further. A 1 at Bit 0 indicates that the EEPROM is busy - * with the previous write. Note also that issuing this read finally - * means the previous write to the EEPROM has completed. - */ - uiRetries = MAX_EEPROM_RETRIES * RETRIES_PER_DELAY; - uiEpromStatus = 0; - while (uiRetries != 0) { - uiEpromStatus = ReadEEPROMStatusRegister(Adapter); - if (Adapter->device_removed == TRUE) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Modem has got removed hence exiting from loop..."); - return -ENODEV; - } - if ((EEPROM_STATUS_REG_WRITE_BUSY & uiEpromStatus) == 0) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "EEPROM status register = %x tries = %d\n", uiEpromStatus, (MAX_EEPROM_RETRIES * RETRIES_PER_DELAY - uiRetries)); - return STATUS_SUCCESS; - } - uiRetries--; - if (uiRetries == 0) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "0x0f003004 = %x, for EEPROM status read %d retries failed.\n", uiEpromStatus, MAX_EEPROM_RETRIES * RETRIES_PER_DELAY); - return STATUS_FAILURE; - } - uiEpromStatus = 0; - if (!(uiRetries%RETRIES_PER_DELAY)) - udelay(1000); - } - - return STATUS_SUCCESS; -} /* BeceemEEPROMWritePage */ - -/* - * Procedure: BeceemEEPROMBulkWrite - * - * Description: Performs write to the EEPROM - * - * Arguments: - * Adapter - ptr to Adapter object instance - * pBuffer - Data to be written. - * uiOffset - Offset of the EEPROM where data needs to be written to. - * uiNumBytes - Number of bytes to be written. - * bVerify - read verify flag. - * Returns: - * OSAL_STATUS_CODE - * - */ - -int BeceemEEPROMBulkWrite(struct bcm_mini_adapter *Adapter, - PUCHAR pBuffer, - unsigned int uiOffset, - unsigned int uiNumBytes, - bool bVerify) -{ - unsigned int uiBytesToCopy = uiNumBytes; - /* unsigned int uiRdbk = 0; */ - unsigned int uiData[4] = {0}; - unsigned int uiIndex = 0; - unsigned int uiTempOffset = 0; - unsigned int uiExtraBytes = 0; - /* PUINT puiBuffer = (PUINT)pBuffer; - * int value; - */ - - if (uiOffset % MAX_RW_SIZE && uiBytesToCopy) { - uiTempOffset = uiOffset - (uiOffset % MAX_RW_SIZE); - uiExtraBytes = uiOffset - uiTempOffset; - - BeceemEEPROMBulkRead(Adapter, &uiData[0], uiTempOffset, MAX_RW_SIZE); - - if (uiBytesToCopy >= (16 - uiExtraBytes)) { - memcpy((((PUCHAR)&uiData[0]) + uiExtraBytes), pBuffer, MAX_RW_SIZE - uiExtraBytes); - - if (STATUS_FAILURE == BeceemEEPROMWritePage(Adapter, uiData, uiTempOffset)) - return STATUS_FAILURE; - - uiBytesToCopy -= (MAX_RW_SIZE - uiExtraBytes); - uiIndex += (MAX_RW_SIZE - uiExtraBytes); - uiOffset += (MAX_RW_SIZE - uiExtraBytes); - } else { - memcpy((((PUCHAR)&uiData[0]) + uiExtraBytes), pBuffer, uiBytesToCopy); - - if (STATUS_FAILURE == BeceemEEPROMWritePage(Adapter, uiData, uiTempOffset)) - return STATUS_FAILURE; - - uiIndex += uiBytesToCopy; - uiOffset += uiBytesToCopy; - uiBytesToCopy = 0; - } - } - - while (uiBytesToCopy) { - if (Adapter->device_removed) - return -1; - - if (uiBytesToCopy >= MAX_RW_SIZE) { - if (STATUS_FAILURE == BeceemEEPROMWritePage(Adapter, (PUINT) &pBuffer[uiIndex], uiOffset)) - return STATUS_FAILURE; - - uiIndex += MAX_RW_SIZE; - uiOffset += MAX_RW_SIZE; - uiBytesToCopy -= MAX_RW_SIZE; - } else { - /* - * To program non 16byte aligned data, read 16byte and then update. - */ - BeceemEEPROMBulkRead(Adapter, &uiData[0], uiOffset, 16); - memcpy(&uiData[0], pBuffer + uiIndex, uiBytesToCopy); - - if (STATUS_FAILURE == BeceemEEPROMWritePage(Adapter, uiData, uiOffset)) - return STATUS_FAILURE; - - uiBytesToCopy = 0; - } - } - - return 0; -} - -/* - * Procedure: BeceemNVMRead - * - * Description: Reads n number of bytes from NVM. - * - * Arguments: - * Adapter - ptr to Adapter object instance - * pBuffer - Buffer to store the data read from NVM - * uiOffset - Offset of NVM from where data should be read - * uiNumBytes - Number of bytes to be read from the NVM. - * - * Returns: - * OSAL_STATUS_SUCCESS - if NVM read is successful. - * - if failed. - */ - -int BeceemNVMRead(struct bcm_mini_adapter *Adapter, - PUINT pBuffer, - unsigned int uiOffset, - unsigned int uiNumBytes) -{ - int Status = 0; - - #if !defined(BCM_SHM_INTERFACE) || defined(FLASH_DIRECT_ACCESS) - unsigned int uiTemp = 0, value; - #endif - - if (Adapter->eNVMType == NVM_FLASH) { - if (Adapter->bFlashRawRead == false) { - if (IsSectionExistInVendorInfo(Adapter, Adapter->eActiveDSD)) - return vendorextnReadSection(Adapter, (PUCHAR)pBuffer, Adapter->eActiveDSD, uiOffset, uiNumBytes); - - uiOffset = uiOffset + Adapter->ulFlashCalStart; - } - - #if defined(BCM_SHM_INTERFACE) && !defined(FLASH_DIRECT_ACCESS) - Status = bcmflash_raw_read((uiOffset / FLASH_PART_SIZE), (uiOffset % FLASH_PART_SIZE), (unsigned char *)pBuffer, uiNumBytes); - #else - rdmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp)); - value = 0; - wrmalt(Adapter, 0x0f000C80, &value, sizeof(value)); - Status = BeceemFlashBulkRead(Adapter, - pBuffer, - uiOffset, - uiNumBytes); - wrmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp)); - #endif - } else if (Adapter->eNVMType == NVM_EEPROM) { - Status = BeceemEEPROMBulkRead(Adapter, - pBuffer, - uiOffset, - uiNumBytes); - } else { - Status = -1; - } - - return Status; -} - -/* - * Procedure: BeceemNVMWrite - * - * Description: Writes n number of bytes to NVM. - * - * Arguments: - * Adapter - ptr to Adapter object instance - * pBuffer - Buffer contains the data to be written. - * uiOffset - Offset of NVM where data to be written to. - * uiNumBytes - Number of bytes to be written.. - * - * Returns: - * OSAL_STATUS_SUCCESS - if NVM write is successful. - * - if failed. - */ - -int BeceemNVMWrite(struct bcm_mini_adapter *Adapter, - PUINT pBuffer, - unsigned int uiOffset, - unsigned int uiNumBytes, - bool bVerify) -{ - int Status = 0; - unsigned int uiTemp = 0; - unsigned int uiMemoryLoc = EEPROM_CAL_DATA_INTERNAL_LOC; - unsigned int uiIndex = 0; - - #if !defined(BCM_SHM_INTERFACE) || defined(FLASH_DIRECT_ACCESS) - unsigned int value; - #endif - - unsigned int uiFlashOffset = 0; - - if (Adapter->eNVMType == NVM_FLASH) { - if (IsSectionExistInVendorInfo(Adapter, Adapter->eActiveDSD)) - Status = vendorextnWriteSection(Adapter, (PUCHAR)pBuffer, Adapter->eActiveDSD, uiOffset, uiNumBytes, bVerify); - else { - uiFlashOffset = uiOffset + Adapter->ulFlashCalStart; - - #if defined(BCM_SHM_INTERFACE) && !defined(FLASH_DIRECT_ACCESS) - Status = bcmflash_raw_write((uiFlashOffset / FLASH_PART_SIZE), (uiFlashOffset % FLASH_PART_SIZE), (unsigned char *)pBuffer, uiNumBytes); - #else - rdmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp)); - value = 0; - wrmalt(Adapter, 0x0f000C80, &value, sizeof(value)); - - if (Adapter->bStatusWrite == TRUE) - Status = BeceemFlashBulkWriteStatus(Adapter, - pBuffer, - uiFlashOffset, - uiNumBytes , - bVerify); - else - - Status = BeceemFlashBulkWrite(Adapter, - pBuffer, - uiFlashOffset, - uiNumBytes, - bVerify); - #endif - } - - if (uiOffset >= EEPROM_CALPARAM_START) { - uiMemoryLoc += (uiOffset - EEPROM_CALPARAM_START); - while (uiNumBytes) { - if (uiNumBytes > BUFFER_4K) { - wrm(Adapter, (uiMemoryLoc+uiIndex), (PCHAR)(pBuffer + (uiIndex / 4)), BUFFER_4K); - uiNumBytes -= BUFFER_4K; - uiIndex += BUFFER_4K; - } else { - wrm(Adapter, uiMemoryLoc+uiIndex, (PCHAR)(pBuffer + (uiIndex / 4)), uiNumBytes); - uiNumBytes = 0; - break; - } - } - } else { - if ((uiOffset + uiNumBytes) > EEPROM_CALPARAM_START) { - ULONG ulBytesTobeSkipped = 0; - PUCHAR pcBuffer = (PUCHAR)pBuffer; /* char pointer to take care of odd byte cases. */ - - uiNumBytes -= (EEPROM_CALPARAM_START - uiOffset); - ulBytesTobeSkipped += (EEPROM_CALPARAM_START - uiOffset); - uiOffset += (EEPROM_CALPARAM_START - uiOffset); - while (uiNumBytes) { - if (uiNumBytes > BUFFER_4K) { - wrm(Adapter, uiMemoryLoc + uiIndex, (PCHAR)&pcBuffer[ulBytesTobeSkipped + uiIndex], BUFFER_4K); - uiNumBytes -= BUFFER_4K; - uiIndex += BUFFER_4K; - } else { - wrm(Adapter, uiMemoryLoc + uiIndex, (PCHAR)&pcBuffer[ulBytesTobeSkipped + uiIndex], uiNumBytes); - uiNumBytes = 0; - break; - } - } - } - } - /* restore the values. */ - wrmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp)); - } else if (Adapter->eNVMType == NVM_EEPROM) { - Status = BeceemEEPROMBulkWrite(Adapter, - (PUCHAR)pBuffer, - uiOffset, - uiNumBytes, - bVerify); - if (bVerify) - Status = BeceemEEPROMReadBackandVerify(Adapter, (PUINT)pBuffer, uiOffset, uiNumBytes); - } else { - Status = -1; - } - return Status; -} - -/* - * Procedure: BcmUpdateSectorSize - * - * Description: Updates the sector size to FLASH. - * - * Arguments: - * Adapter - ptr to Adapter object instance - * uiSectorSize - sector size - * - * Returns: - * OSAL_STATUS_SUCCESS - if NVM write is successful. - * - if failed. - */ - -int BcmUpdateSectorSize(struct bcm_mini_adapter *Adapter, unsigned int uiSectorSize) -{ - int Status = -1; - struct bcm_flash_cs_info sFlashCsInfo = {0}; - unsigned int uiTemp = 0; - unsigned int uiSectorSig = 0; - unsigned int uiCurrentSectorSize = 0; - unsigned int value; - - rdmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp)); - value = 0; - wrmalt(Adapter, 0x0f000C80, &value, sizeof(value)); - - /* - * Before updating the sector size in the reserved area, check if already present. - */ - BeceemFlashBulkRead(Adapter, (PUINT)&sFlashCsInfo, Adapter->ulFlashControlSectionStart, sizeof(sFlashCsInfo)); - uiSectorSig = ntohl(sFlashCsInfo.FlashSectorSizeSig); - uiCurrentSectorSize = ntohl(sFlashCsInfo.FlashSectorSize); - - if (uiSectorSig == FLASH_SECTOR_SIZE_SIG) { - if ((uiCurrentSectorSize <= MAX_SECTOR_SIZE) && (uiCurrentSectorSize >= MIN_SECTOR_SIZE)) { - if (uiSectorSize == uiCurrentSectorSize) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Provided sector size is same as programmed in Flash"); - Status = STATUS_SUCCESS; - goto Restore; - } - } - } - - if ((uiSectorSize <= MAX_SECTOR_SIZE) && (uiSectorSize >= MIN_SECTOR_SIZE)) { - sFlashCsInfo.FlashSectorSize = htonl(uiSectorSize); - sFlashCsInfo.FlashSectorSizeSig = htonl(FLASH_SECTOR_SIZE_SIG); - - Status = BeceemFlashBulkWrite(Adapter, - (PUINT)&sFlashCsInfo, - Adapter->ulFlashControlSectionStart, - sizeof(sFlashCsInfo), - TRUE); - } - -Restore: - /* restore the values. */ - wrmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp)); - - return Status; -} - -/* - * Procedure: BcmGetFlashSectorSize - * - * Description: Finds the sector size of the FLASH. - * - * Arguments: - * Adapter - ptr to Adapter object instance - * - * Returns: - * unsigned int - sector size. - * - */ - -static unsigned int BcmGetFlashSectorSize(struct bcm_mini_adapter *Adapter, unsigned int FlashSectorSizeSig, unsigned int FlashSectorSize) -{ - unsigned int uiSectorSize = 0; - unsigned int uiSectorSig = 0; - - if (Adapter->bSectorSizeOverride && - (Adapter->uiSectorSizeInCFG <= MAX_SECTOR_SIZE && - Adapter->uiSectorSizeInCFG >= MIN_SECTOR_SIZE)) { - Adapter->uiSectorSize = Adapter->uiSectorSizeInCFG; - } else { - uiSectorSig = FlashSectorSizeSig; - - if (uiSectorSig == FLASH_SECTOR_SIZE_SIG) { - uiSectorSize = FlashSectorSize; - /* - * If the sector size stored in the FLASH makes sense then use it. - */ - if (uiSectorSize <= MAX_SECTOR_SIZE && uiSectorSize >= MIN_SECTOR_SIZE) { - Adapter->uiSectorSize = uiSectorSize; - } else if (Adapter->uiSectorSizeInCFG <= MAX_SECTOR_SIZE && - Adapter->uiSectorSizeInCFG >= MIN_SECTOR_SIZE) { - /* No valid size in FLASH, check if Config file has it. */ - Adapter->uiSectorSize = Adapter->uiSectorSizeInCFG; - } else { - /* Init to Default, if none of the above works. */ - Adapter->uiSectorSize = DEFAULT_SECTOR_SIZE; - } - } else { - if (Adapter->uiSectorSizeInCFG <= MAX_SECTOR_SIZE && - Adapter->uiSectorSizeInCFG >= MIN_SECTOR_SIZE) - Adapter->uiSectorSize = Adapter->uiSectorSizeInCFG; - else - Adapter->uiSectorSize = DEFAULT_SECTOR_SIZE; - } - } - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Sector size :%x\n", Adapter->uiSectorSize); - - return Adapter->uiSectorSize; -} - -/* - * Procedure: BcmInitEEPROMQueues - * - * Description: Initialization of EEPROM queues. - * - * Arguments: - * Adapter - ptr to Adapter object instance - * - * Returns: - * - */ - -static int BcmInitEEPROMQueues(struct bcm_mini_adapter *Adapter) -{ - unsigned int value = 0; - /* CHIP Bug : Clear the Avail bits on the Read queue. The default - * value on this register is supposed to be 0x00001102. - * But we get 0x00001122. - */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Fixing reset value on 0x0f003004 register\n"); - value = EEPROM_READ_DATA_AVAIL; - wrmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value)); - - /* Flush the all the EEPROM queues. */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, " Flushing the queues\n"); - value = EEPROM_ALL_QUEUE_FLUSH; - wrmalt(Adapter, SPI_FLUSH_REG, &value, sizeof(value)); - - value = 0; - wrmalt(Adapter, SPI_FLUSH_REG, &value, sizeof(value)); - - /* Read the EEPROM Status Register. Just to see, no real purpose. */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "EEPROM Status register value = %x\n", ReadEEPROMStatusRegister(Adapter)); - - return STATUS_SUCCESS; -} /* BcmInitEEPROMQueues() */ - -/* - * Procedure: BcmInitNVM - * - * Description: Initialization of NVM, EEPROM size,FLASH size, sector size etc. - * - * Arguments: - * Adapter - ptr to Adapter object instance - * - * Returns: - * - */ - -int BcmInitNVM(struct bcm_mini_adapter *ps_adapter) -{ - BcmValidateNvmType(ps_adapter); - BcmInitEEPROMQueues(ps_adapter); - - if (ps_adapter->eNVMType == NVM_AUTODETECT) { - ps_adapter->eNVMType = BcmGetNvmType(ps_adapter); - if (ps_adapter->eNVMType == NVM_UNKNOWN) - BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_PRINTK, 0, 0, "NVM Type is unknown!!\n"); - } else if (ps_adapter->eNVMType == NVM_FLASH) { - BcmGetFlashCSInfo(ps_adapter); - } - - BcmGetNvmSize(ps_adapter); - - return STATUS_SUCCESS; -} - -/* BcmGetNvmSize : set the EEPROM or flash size in Adapter. - * - * Input Parameter: - * Adapter data structure - * Return Value : - * 0. means success; - */ - -static int BcmGetNvmSize(struct bcm_mini_adapter *Adapter) -{ - if (Adapter->eNVMType == NVM_EEPROM) - Adapter->uiNVMDSDSize = BcmGetEEPROMSize(Adapter); - else if (Adapter->eNVMType == NVM_FLASH) - Adapter->uiNVMDSDSize = BcmGetFlashSize(Adapter); - - return 0; -} - -/* - * Procedure: BcmValidateNvm - * - * Description: Validates the NVM Type option selected against the device - * - * Arguments: - * Adapter - ptr to Adapter object instance - * - * Returns: - * - */ - -static VOID BcmValidateNvmType(struct bcm_mini_adapter *Adapter) -{ - /* - * if forcing the FLASH through CFG file, we should ensure device really has a FLASH. - * Accessing the FLASH address without the FLASH being present can cause hang/freeze etc. - * So if NVM_FLASH is selected for older chipsets, change it to AUTODETECT where EEPROM is 1st choice. - */ - - if (Adapter->eNVMType == NVM_FLASH && - Adapter->chip_id < 0xBECE3300) - Adapter->eNVMType = NVM_AUTODETECT; -} - -/* - * Procedure: BcmReadFlashRDID - * - * Description: Reads ID from Serial Flash - * - * Arguments: - * Adapter - ptr to Adapter object instance - * - * Returns: - * Flash ID - */ - -static ULONG BcmReadFlashRDID(struct bcm_mini_adapter *Adapter) -{ - ULONG ulRDID = 0; - unsigned int value; - - /* - * Read ID Instruction. - */ - value = (FLASH_CMD_READ_ID << 24); - wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)); - - /* Delay */ - udelay(10); - - /* - * Read SPI READQ REG. The output will be WWXXYYZZ. - * The ID is 3Bytes long and is WWXXYY. ZZ needs to be Ignored. - */ - rdmalt(Adapter, FLASH_SPI_READQ_REG, (PUINT)&ulRDID, sizeof(ulRDID)); - - return ulRDID >> 8; -} - -int BcmAllocFlashCSStructure(struct bcm_mini_adapter *psAdapter) -{ - if (!psAdapter) { - BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_PRINTK, 0, 0, "Adapter structure point is NULL"); - return -EINVAL; - } - psAdapter->psFlashCSInfo = kzalloc(sizeof(struct bcm_flash_cs_info), GFP_KERNEL); - if (psAdapter->psFlashCSInfo == NULL) { - BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_PRINTK, 0, 0, "Can't Allocate memory for Flash 1.x"); - return -ENOMEM; - } - - psAdapter->psFlash2xCSInfo = kzalloc(sizeof(struct bcm_flash2x_cs_info), GFP_KERNEL); - if (!psAdapter->psFlash2xCSInfo) { - BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_PRINTK, 0, 0, "Can't Allocate memory for Flash 2.x"); - kfree(psAdapter->psFlashCSInfo); - return -ENOMEM; - } - - psAdapter->psFlash2xVendorInfo = kzalloc(sizeof(struct bcm_flash2x_vendor_info), GFP_KERNEL); - if (!psAdapter->psFlash2xVendorInfo) { - BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_PRINTK, 0, 0, "Can't Allocate Vendor Info Memory for Flash 2.x"); - kfree(psAdapter->psFlashCSInfo); - kfree(psAdapter->psFlash2xCSInfo); - return -ENOMEM; - } - - return STATUS_SUCCESS; -} - -int BcmDeAllocFlashCSStructure(struct bcm_mini_adapter *psAdapter) -{ - if (!psAdapter) { - BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_PRINTK, 0, 0, "Adapter structure point is NULL"); - return -EINVAL; - } - kfree(psAdapter->psFlashCSInfo); - kfree(psAdapter->psFlash2xCSInfo); - kfree(psAdapter->psFlash2xVendorInfo); - return STATUS_SUCCESS; -} - -static int BcmDumpFlash2XCSStructure(struct bcm_flash2x_cs_info *psFlash2xCSInfo, struct bcm_mini_adapter *Adapter) -{ - unsigned int Index = 0; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "**********************FLASH2X CS Structure *******************"); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Signature is :%x", (psFlash2xCSInfo->MagicNumber)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Flash Major Version :%d", MAJOR_VERSION(psFlash2xCSInfo->FlashLayoutVersion)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Flash Minor Version :%d", MINOR_VERSION(psFlash2xCSInfo->FlashLayoutVersion)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, " ISOImageMajorVersion:0x%x", (psFlash2xCSInfo->ISOImageVersion)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "SCSIFirmwareMajorVersion :0x%x", (psFlash2xCSInfo->SCSIFirmwareVersion)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForPart1ISOImage :0x%x", (psFlash2xCSInfo->OffsetFromZeroForPart1ISOImage)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForScsiFirmware :0x%x", (psFlash2xCSInfo->OffsetFromZeroForScsiFirmware)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "SizeOfScsiFirmware :0x%x", (psFlash2xCSInfo->SizeOfScsiFirmware)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForPart2ISOImage :0x%x", (psFlash2xCSInfo->OffsetFromZeroForPart2ISOImage)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForDSDStart :0x%x", (psFlash2xCSInfo->OffsetFromZeroForDSDStart)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForDSDEnd :0x%x", (psFlash2xCSInfo->OffsetFromZeroForDSDEnd)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForVSAStart :0x%x", (psFlash2xCSInfo->OffsetFromZeroForVSAStart)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForVSAEnd :0x%x", (psFlash2xCSInfo->OffsetFromZeroForVSAEnd)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForControlSectionStart :0x%x", (psFlash2xCSInfo->OffsetFromZeroForControlSectionStart)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForControlSectionData :0x%x", (psFlash2xCSInfo->OffsetFromZeroForControlSectionData)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "CDLessInactivityTimeout :0x%x", (psFlash2xCSInfo->CDLessInactivityTimeout)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "NewImageSignature :0x%x", (psFlash2xCSInfo->NewImageSignature)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "FlashSectorSizeSig :0x%x", (psFlash2xCSInfo->FlashSectorSizeSig)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "FlashSectorSize :0x%x", (psFlash2xCSInfo->FlashSectorSize)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "FlashWriteSupportSize :0x%x", (psFlash2xCSInfo->FlashWriteSupportSize)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "TotalFlashSize :0x%X", (psFlash2xCSInfo->TotalFlashSize)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "FlashBaseAddr :0x%x", (psFlash2xCSInfo->FlashBaseAddr)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "FlashPartMaxSize :0x%x", (psFlash2xCSInfo->FlashPartMaxSize)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "IsCDLessDeviceBootSig :0x%x", (psFlash2xCSInfo->IsCDLessDeviceBootSig)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "MassStorageTimeout :0x%x", (psFlash2xCSInfo->MassStorageTimeout)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage1Part1Start :0x%x", (psFlash2xCSInfo->OffsetISOImage1Part1Start)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage1Part1End :0x%x", (psFlash2xCSInfo->OffsetISOImage1Part1End)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage1Part2Start :0x%x", (psFlash2xCSInfo->OffsetISOImage1Part2Start)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage1Part2End :0x%x", (psFlash2xCSInfo->OffsetISOImage1Part2End)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage1Part3Start :0x%x", (psFlash2xCSInfo->OffsetISOImage1Part3Start)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage1Part3End :0x%x", (psFlash2xCSInfo->OffsetISOImage1Part3End)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage2Part1Start :0x%x", (psFlash2xCSInfo->OffsetISOImage2Part1Start)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage2Part1End :0x%x", (psFlash2xCSInfo->OffsetISOImage2Part1End)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage2Part2Start :0x%x", (psFlash2xCSInfo->OffsetISOImage2Part2Start)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage2Part2End :0x%x", (psFlash2xCSInfo->OffsetISOImage2Part2End)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage2Part3Start :0x%x", (psFlash2xCSInfo->OffsetISOImage2Part3Start)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage2Part3End :0x%x", (psFlash2xCSInfo->OffsetISOImage2Part3End)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromDSDStartForDSDHeader :0x%x", (psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForDSD1Start :0x%x", (psFlash2xCSInfo->OffsetFromZeroForDSD1Start)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForDSD1End :0x%x", (psFlash2xCSInfo->OffsetFromZeroForDSD1End)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForDSD2Start :0x%x", (psFlash2xCSInfo->OffsetFromZeroForDSD2Start)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForDSD2End :0x%x", (psFlash2xCSInfo->OffsetFromZeroForDSD2End)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForVSA1Start :0x%x", (psFlash2xCSInfo->OffsetFromZeroForVSA1Start)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForVSA1End :0x%x", (psFlash2xCSInfo->OffsetFromZeroForVSA1End)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForVSA2Start :0x%x", (psFlash2xCSInfo->OffsetFromZeroForVSA2Start)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForVSA2End :0x%x", (psFlash2xCSInfo->OffsetFromZeroForVSA2End)); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Sector Access Bit Map is Defined as :"); - - for (Index = 0; Index < (FLASH2X_TOTAL_SIZE / (DEFAULT_SECTOR_SIZE * 16)); Index++) - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "SectorAccessBitMap[%d] :0x%x", Index, - (psFlash2xCSInfo->SectorAccessBitMap[Index])); - - return STATUS_SUCCESS; -} - -static int ConvertEndianOf2XCSStructure(struct bcm_flash2x_cs_info *psFlash2xCSInfo) -{ - unsigned int Index = 0; - - psFlash2xCSInfo->MagicNumber = ntohl(psFlash2xCSInfo->MagicNumber); - psFlash2xCSInfo->FlashLayoutVersion = ntohl(psFlash2xCSInfo->FlashLayoutVersion); - /* psFlash2xCSInfo->FlashLayoutMinorVersion = ntohs(psFlash2xCSInfo->FlashLayoutMinorVersion); */ - psFlash2xCSInfo->ISOImageVersion = ntohl(psFlash2xCSInfo->ISOImageVersion); - psFlash2xCSInfo->SCSIFirmwareVersion = ntohl(psFlash2xCSInfo->SCSIFirmwareVersion); - psFlash2xCSInfo->OffsetFromZeroForPart1ISOImage = ntohl(psFlash2xCSInfo->OffsetFromZeroForPart1ISOImage); - psFlash2xCSInfo->OffsetFromZeroForScsiFirmware = ntohl(psFlash2xCSInfo->OffsetFromZeroForScsiFirmware); - psFlash2xCSInfo->SizeOfScsiFirmware = ntohl(psFlash2xCSInfo->SizeOfScsiFirmware); - psFlash2xCSInfo->OffsetFromZeroForPart2ISOImage = ntohl(psFlash2xCSInfo->OffsetFromZeroForPart2ISOImage); - psFlash2xCSInfo->OffsetFromZeroForDSDStart = ntohl(psFlash2xCSInfo->OffsetFromZeroForDSDStart); - psFlash2xCSInfo->OffsetFromZeroForDSDEnd = ntohl(psFlash2xCSInfo->OffsetFromZeroForDSDEnd); - psFlash2xCSInfo->OffsetFromZeroForVSAStart = ntohl(psFlash2xCSInfo->OffsetFromZeroForVSAStart); - psFlash2xCSInfo->OffsetFromZeroForVSAEnd = ntohl(psFlash2xCSInfo->OffsetFromZeroForVSAEnd); - psFlash2xCSInfo->OffsetFromZeroForControlSectionStart = ntohl(psFlash2xCSInfo->OffsetFromZeroForControlSectionStart); - psFlash2xCSInfo->OffsetFromZeroForControlSectionData = ntohl(psFlash2xCSInfo->OffsetFromZeroForControlSectionData); - psFlash2xCSInfo->CDLessInactivityTimeout = ntohl(psFlash2xCSInfo->CDLessInactivityTimeout); - psFlash2xCSInfo->NewImageSignature = ntohl(psFlash2xCSInfo->NewImageSignature); - psFlash2xCSInfo->FlashSectorSizeSig = ntohl(psFlash2xCSInfo->FlashSectorSizeSig); - psFlash2xCSInfo->FlashSectorSize = ntohl(psFlash2xCSInfo->FlashSectorSize); - psFlash2xCSInfo->FlashWriteSupportSize = ntohl(psFlash2xCSInfo->FlashWriteSupportSize); - psFlash2xCSInfo->TotalFlashSize = ntohl(psFlash2xCSInfo->TotalFlashSize); - psFlash2xCSInfo->FlashBaseAddr = ntohl(psFlash2xCSInfo->FlashBaseAddr); - psFlash2xCSInfo->FlashPartMaxSize = ntohl(psFlash2xCSInfo->FlashPartMaxSize); - psFlash2xCSInfo->IsCDLessDeviceBootSig = ntohl(psFlash2xCSInfo->IsCDLessDeviceBootSig); - psFlash2xCSInfo->MassStorageTimeout = ntohl(psFlash2xCSInfo->MassStorageTimeout); - psFlash2xCSInfo->OffsetISOImage1Part1Start = ntohl(psFlash2xCSInfo->OffsetISOImage1Part1Start); - psFlash2xCSInfo->OffsetISOImage1Part1End = ntohl(psFlash2xCSInfo->OffsetISOImage1Part1End); - psFlash2xCSInfo->OffsetISOImage1Part2Start = ntohl(psFlash2xCSInfo->OffsetISOImage1Part2Start); - psFlash2xCSInfo->OffsetISOImage1Part2End = ntohl(psFlash2xCSInfo->OffsetISOImage1Part2End); - psFlash2xCSInfo->OffsetISOImage1Part3Start = ntohl(psFlash2xCSInfo->OffsetISOImage1Part3Start); - psFlash2xCSInfo->OffsetISOImage1Part3End = ntohl(psFlash2xCSInfo->OffsetISOImage1Part3End); - psFlash2xCSInfo->OffsetISOImage2Part1Start = ntohl(psFlash2xCSInfo->OffsetISOImage2Part1Start); - psFlash2xCSInfo->OffsetISOImage2Part1End = ntohl(psFlash2xCSInfo->OffsetISOImage2Part1End); - psFlash2xCSInfo->OffsetISOImage2Part2Start = ntohl(psFlash2xCSInfo->OffsetISOImage2Part2Start); - psFlash2xCSInfo->OffsetISOImage2Part2End = ntohl(psFlash2xCSInfo->OffsetISOImage2Part2End); - psFlash2xCSInfo->OffsetISOImage2Part3Start = ntohl(psFlash2xCSInfo->OffsetISOImage2Part3Start); - psFlash2xCSInfo->OffsetISOImage2Part3End = ntohl(psFlash2xCSInfo->OffsetISOImage2Part3End); - psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader = ntohl(psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader); - psFlash2xCSInfo->OffsetFromZeroForDSD1Start = ntohl(psFlash2xCSInfo->OffsetFromZeroForDSD1Start); - psFlash2xCSInfo->OffsetFromZeroForDSD1End = ntohl(psFlash2xCSInfo->OffsetFromZeroForDSD1End); - psFlash2xCSInfo->OffsetFromZeroForDSD2Start = ntohl(psFlash2xCSInfo->OffsetFromZeroForDSD2Start); - psFlash2xCSInfo->OffsetFromZeroForDSD2End = ntohl(psFlash2xCSInfo->OffsetFromZeroForDSD2End); - psFlash2xCSInfo->OffsetFromZeroForVSA1Start = ntohl(psFlash2xCSInfo->OffsetFromZeroForVSA1Start); - psFlash2xCSInfo->OffsetFromZeroForVSA1End = ntohl(psFlash2xCSInfo->OffsetFromZeroForVSA1End); - psFlash2xCSInfo->OffsetFromZeroForVSA2Start = ntohl(psFlash2xCSInfo->OffsetFromZeroForVSA2Start); - psFlash2xCSInfo->OffsetFromZeroForVSA2End = ntohl(psFlash2xCSInfo->OffsetFromZeroForVSA2End); - - for (Index = 0; Index < (FLASH2X_TOTAL_SIZE / (DEFAULT_SECTOR_SIZE * 16)); Index++) - psFlash2xCSInfo->SectorAccessBitMap[Index] = ntohl(psFlash2xCSInfo->SectorAccessBitMap[Index]); - - return STATUS_SUCCESS; -} - -static int ConvertEndianOfCSStructure(struct bcm_flash_cs_info *psFlashCSInfo) -{ - /* unsigned int Index = 0; */ - psFlashCSInfo->MagicNumber = ntohl(psFlashCSInfo->MagicNumber); - psFlashCSInfo->FlashLayoutVersion = ntohl(psFlashCSInfo->FlashLayoutVersion); - psFlashCSInfo->ISOImageVersion = ntohl(psFlashCSInfo->ISOImageVersion); - /* won't convert according to old assumption */ - psFlashCSInfo->SCSIFirmwareVersion = (psFlashCSInfo->SCSIFirmwareVersion); - psFlashCSInfo->OffsetFromZeroForPart1ISOImage = ntohl(psFlashCSInfo->OffsetFromZeroForPart1ISOImage); - psFlashCSInfo->OffsetFromZeroForScsiFirmware = ntohl(psFlashCSInfo->OffsetFromZeroForScsiFirmware); - psFlashCSInfo->SizeOfScsiFirmware = ntohl(psFlashCSInfo->SizeOfScsiFirmware); - psFlashCSInfo->OffsetFromZeroForPart2ISOImage = ntohl(psFlashCSInfo->OffsetFromZeroForPart2ISOImage); - psFlashCSInfo->OffsetFromZeroForCalibrationStart = ntohl(psFlashCSInfo->OffsetFromZeroForCalibrationStart); - psFlashCSInfo->OffsetFromZeroForCalibrationEnd = ntohl(psFlashCSInfo->OffsetFromZeroForCalibrationEnd); - psFlashCSInfo->OffsetFromZeroForVSAStart = ntohl(psFlashCSInfo->OffsetFromZeroForVSAStart); - psFlashCSInfo->OffsetFromZeroForVSAEnd = ntohl(psFlashCSInfo->OffsetFromZeroForVSAEnd); - psFlashCSInfo->OffsetFromZeroForControlSectionStart = ntohl(psFlashCSInfo->OffsetFromZeroForControlSectionStart); - psFlashCSInfo->OffsetFromZeroForControlSectionData = ntohl(psFlashCSInfo->OffsetFromZeroForControlSectionData); - psFlashCSInfo->CDLessInactivityTimeout = ntohl(psFlashCSInfo->CDLessInactivityTimeout); - psFlashCSInfo->NewImageSignature = ntohl(psFlashCSInfo->NewImageSignature); - psFlashCSInfo->FlashSectorSizeSig = ntohl(psFlashCSInfo->FlashSectorSizeSig); - psFlashCSInfo->FlashSectorSize = ntohl(psFlashCSInfo->FlashSectorSize); - psFlashCSInfo->FlashWriteSupportSize = ntohl(psFlashCSInfo->FlashWriteSupportSize); - psFlashCSInfo->TotalFlashSize = ntohl(psFlashCSInfo->TotalFlashSize); - psFlashCSInfo->FlashBaseAddr = ntohl(psFlashCSInfo->FlashBaseAddr); - psFlashCSInfo->FlashPartMaxSize = ntohl(psFlashCSInfo->FlashPartMaxSize); - psFlashCSInfo->IsCDLessDeviceBootSig = ntohl(psFlashCSInfo->IsCDLessDeviceBootSig); - psFlashCSInfo->MassStorageTimeout = ntohl(psFlashCSInfo->MassStorageTimeout); - - return STATUS_SUCCESS; -} - -static int IsSectionExistInVendorInfo(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val section) -{ - return (Adapter->uiVendorExtnFlag && - (Adapter->psFlash2xVendorInfo->VendorSection[section].AccessFlags & FLASH2X_SECTION_PRESENT) && - (Adapter->psFlash2xVendorInfo->VendorSection[section].OffsetFromZeroForSectionStart != UNINIT_PTR_IN_CS)); -} - -static VOID UpdateVendorInfo(struct bcm_mini_adapter *Adapter) -{ - B_UINT32 i = 0; - unsigned int uiSizeSection = 0; - - Adapter->uiVendorExtnFlag = false; - - for (i = 0; i < TOTAL_SECTIONS; i++) - Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionStart = UNINIT_PTR_IN_CS; - - if (STATUS_SUCCESS != vendorextnGetSectionInfo(Adapter, Adapter->psFlash2xVendorInfo)) - return; - - i = 0; - while (i < TOTAL_SECTIONS) { - if (!(Adapter->psFlash2xVendorInfo->VendorSection[i].AccessFlags & FLASH2X_SECTION_PRESENT)) { - i++; - continue; - } - - Adapter->uiVendorExtnFlag = TRUE; - uiSizeSection = (Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionEnd - - Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionStart); - - switch (i) { - case DSD0: - if ((uiSizeSection >= (Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + sizeof(struct bcm_dsd_header))) && - (UNINIT_PTR_IN_CS != Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionStart)) - Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDEnd = VENDOR_PTR_IN_CS; - else - Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDEnd = UNINIT_PTR_IN_CS; - break; - - case DSD1: - if ((uiSizeSection >= (Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + sizeof(struct bcm_dsd_header))) && - (UNINIT_PTR_IN_CS != Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionStart)) - Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1Start = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1End = VENDOR_PTR_IN_CS; - else - Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1Start = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1End = UNINIT_PTR_IN_CS; - break; - - case DSD2: - if ((uiSizeSection >= (Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + sizeof(struct bcm_dsd_header))) && - (UNINIT_PTR_IN_CS != Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionStart)) - Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2Start = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2End = VENDOR_PTR_IN_CS; - else - Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2Start = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2End = UNINIT_PTR_IN_CS; - break; - case VSA0: - if (UNINIT_PTR_IN_CS != Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionStart) - Adapter->psFlash2xCSInfo->OffsetFromZeroForVSAStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForVSAEnd = VENDOR_PTR_IN_CS; - else - Adapter->psFlash2xCSInfo->OffsetFromZeroForVSAStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForVSAEnd = UNINIT_PTR_IN_CS; - break; - - case VSA1: - if (UNINIT_PTR_IN_CS != Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionStart) - Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA1Start = Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA1End = VENDOR_PTR_IN_CS; - else - Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA1Start = Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA1End = UNINIT_PTR_IN_CS; - break; - case VSA2: - if (UNINIT_PTR_IN_CS != Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionStart) - Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA2Start = Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA2End = VENDOR_PTR_IN_CS; - else - Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA2Start = Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA2End = UNINIT_PTR_IN_CS; - break; - - default: - break; - } - i++; - } -} - -/* - * Procedure: BcmGetFlashCSInfo - * - * Description: Reads control structure and gets Cal section addresses. - * - * Arguments: - * Adapter - ptr to Adapter object instance - * - * Returns: - * - */ - -static int BcmGetFlashCSInfo(struct bcm_mini_adapter *Adapter) -{ - /* struct bcm_flash_cs_info sFlashCsInfo = {0}; */ - - #if !defined(BCM_SHM_INTERFACE) || defined(FLASH_DIRECT_ACCESS) - unsigned int value; - #endif - - unsigned int uiFlashLayoutMajorVersion; - - Adapter->uiFlashLayoutMinorVersion = 0; - Adapter->uiFlashLayoutMajorVersion = 0; - Adapter->ulFlashControlSectionStart = FLASH_CS_INFO_START_ADDR; - - Adapter->uiFlashBaseAdd = 0; - Adapter->ulFlashCalStart = 0; - memset(Adapter->psFlashCSInfo, 0 , sizeof(struct bcm_flash_cs_info)); - memset(Adapter->psFlash2xCSInfo, 0 , sizeof(struct bcm_flash2x_cs_info)); - - if (!Adapter->bDDRInitDone) { - value = FLASH_CONTIGIOUS_START_ADDR_BEFORE_INIT; - wrmalt(Adapter, 0xAF00A080, &value, sizeof(value)); - } - - /* Reading first 8 Bytes to get the Flash Layout - * MagicNumber(4 bytes) +FlashLayoutMinorVersion(2 Bytes) +FlashLayoutMajorVersion(2 Bytes) - */ - BeceemFlashBulkRead(Adapter, (PUINT)Adapter->psFlashCSInfo, Adapter->ulFlashControlSectionStart, 8); - - Adapter->psFlashCSInfo->FlashLayoutVersion = ntohl(Adapter->psFlashCSInfo->FlashLayoutVersion); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Flash Layout Version :%X", (Adapter->psFlashCSInfo->FlashLayoutVersion)); - /* BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Flash Layout Minor Version :%d\n", ntohs(sFlashCsInfo.FlashLayoutMinorVersion)); */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Signature is :%x\n", ntohl(Adapter->psFlashCSInfo->MagicNumber)); - - if (FLASH_CONTROL_STRUCT_SIGNATURE == ntohl(Adapter->psFlashCSInfo->MagicNumber)) { - uiFlashLayoutMajorVersion = MAJOR_VERSION((Adapter->psFlashCSInfo->FlashLayoutVersion)); - Adapter->uiFlashLayoutMinorVersion = MINOR_VERSION((Adapter->psFlashCSInfo->FlashLayoutVersion)); - } else { - Adapter->uiFlashLayoutMinorVersion = 0; - uiFlashLayoutMajorVersion = 0; - } - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "FLASH LAYOUT MAJOR VERSION :%X", uiFlashLayoutMajorVersion); - - if (uiFlashLayoutMajorVersion < FLASH_2X_MAJOR_NUMBER) { - BeceemFlashBulkRead(Adapter, (PUINT)Adapter->psFlashCSInfo, Adapter->ulFlashControlSectionStart, sizeof(struct bcm_flash_cs_info)); - ConvertEndianOfCSStructure(Adapter->psFlashCSInfo); - Adapter->ulFlashCalStart = (Adapter->psFlashCSInfo->OffsetFromZeroForCalibrationStart); - - if (!((Adapter->uiFlashLayoutMajorVersion == 1) && (Adapter->uiFlashLayoutMinorVersion == 1))) - Adapter->ulFlashControlSectionStart = Adapter->psFlashCSInfo->OffsetFromZeroForControlSectionStart; - - if ((FLASH_CONTROL_STRUCT_SIGNATURE == (Adapter->psFlashCSInfo->MagicNumber)) && - (SCSI_FIRMWARE_MINOR_VERSION <= MINOR_VERSION(Adapter->psFlashCSInfo->SCSIFirmwareVersion)) && - (FLASH_SECTOR_SIZE_SIG == (Adapter->psFlashCSInfo->FlashSectorSizeSig)) && - (BYTE_WRITE_SUPPORT == (Adapter->psFlashCSInfo->FlashWriteSupportSize))) { - Adapter->ulFlashWriteSize = (Adapter->psFlashCSInfo->FlashWriteSupportSize); - Adapter->fpFlashWrite = flashByteWrite; - Adapter->fpFlashWriteWithStatusCheck = flashByteWriteStatus; - } else { - Adapter->ulFlashWriteSize = MAX_RW_SIZE; - Adapter->fpFlashWrite = flashWrite; - Adapter->fpFlashWriteWithStatusCheck = flashWriteStatus; - } - - BcmGetFlashSectorSize(Adapter, (Adapter->psFlashCSInfo->FlashSectorSizeSig), - (Adapter->psFlashCSInfo->FlashSectorSize)); - Adapter->uiFlashBaseAdd = Adapter->psFlashCSInfo->FlashBaseAddr & 0xFCFFFFFF; - } else { - if (BcmFlash2xBulkRead(Adapter, (PUINT)Adapter->psFlash2xCSInfo, NO_SECTION_VAL, - Adapter->ulFlashControlSectionStart, sizeof(struct bcm_flash2x_cs_info))) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Unable to read CS structure\n"); - return STATUS_FAILURE; - } - - ConvertEndianOf2XCSStructure(Adapter->psFlash2xCSInfo); - BcmDumpFlash2XCSStructure(Adapter->psFlash2xCSInfo, Adapter); - if ((FLASH_CONTROL_STRUCT_SIGNATURE == Adapter->psFlash2xCSInfo->MagicNumber) && - (SCSI_FIRMWARE_MINOR_VERSION <= MINOR_VERSION(Adapter->psFlash2xCSInfo->SCSIFirmwareVersion)) && - (FLASH_SECTOR_SIZE_SIG == Adapter->psFlash2xCSInfo->FlashSectorSizeSig) && - (BYTE_WRITE_SUPPORT == Adapter->psFlash2xCSInfo->FlashWriteSupportSize)) { - Adapter->ulFlashWriteSize = Adapter->psFlash2xCSInfo->FlashWriteSupportSize; - Adapter->fpFlashWrite = flashByteWrite; - Adapter->fpFlashWriteWithStatusCheck = flashByteWriteStatus; - } else { - Adapter->ulFlashWriteSize = MAX_RW_SIZE; - Adapter->fpFlashWrite = flashWrite; - Adapter->fpFlashWriteWithStatusCheck = flashWriteStatus; - } - - BcmGetFlashSectorSize(Adapter, Adapter->psFlash2xCSInfo->FlashSectorSizeSig, - Adapter->psFlash2xCSInfo->FlashSectorSize); - - UpdateVendorInfo(Adapter); - - BcmGetActiveDSD(Adapter); - BcmGetActiveISO(Adapter); - Adapter->uiFlashBaseAdd = Adapter->psFlash2xCSInfo->FlashBaseAddr & 0xFCFFFFFF; - Adapter->ulFlashControlSectionStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForControlSectionStart; - } - /* - * Concerns: what if CS sector size does not match with this sector size ??? - * what is the indication of AccessBitMap in CS in flash 2.x ???? - */ - Adapter->ulFlashID = BcmReadFlashRDID(Adapter); - Adapter->uiFlashLayoutMajorVersion = uiFlashLayoutMajorVersion; - - return STATUS_SUCCESS; -} - -/* - * Procedure: BcmGetNvmType - * - * Description: Finds the type of NVM used. - * - * Arguments: - * Adapter - ptr to Adapter object instance - * - * Returns: - * NVM_TYPE - * - */ - -static enum bcm_nvm_type BcmGetNvmType(struct bcm_mini_adapter *Adapter) -{ - unsigned int uiData = 0; - - BeceemEEPROMBulkRead(Adapter, &uiData, 0x0, 4); - if (uiData == BECM) - return NVM_EEPROM; - - /* - * Read control struct and get cal addresses before accessing the flash - */ - BcmGetFlashCSInfo(Adapter); - - BeceemFlashBulkRead(Adapter, &uiData, 0x0 + Adapter->ulFlashCalStart, 4); - if (uiData == BECM) - return NVM_FLASH; - - /* - * even if there is no valid signature on EEPROM/FLASH find out if they really exist. - * if exist select it. - */ - if (BcmGetEEPROMSize(Adapter)) - return NVM_EEPROM; - - /* TBD for Flash. */ - return NVM_UNKNOWN; -} - -/* - * BcmGetSectionValStartOffset - this will calculate the section's starting offset if section val is given - * @Adapter : Drivers Private Data structure - * @eFlashSectionVal : Flash secion value defined in enum bcm_flash2x_section_val - * - * Return value:- - * On success it return the start offset of the provided section val - * On Failure -returns STATUS_FAILURE - */ - -int BcmGetSectionValStartOffset(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlashSectionVal) -{ - /* - * Considering all the section for which end offset can be calculated or directly given - * in CS Structure. if matching case does not exist, return STATUS_FAILURE indicating section - * endoffset can't be calculated or given in CS Structure. - */ - - int SectStartOffset = 0; - - SectStartOffset = INVALID_OFFSET; - - if (IsSectionExistInVendorInfo(Adapter, eFlashSectionVal)) - return Adapter->psFlash2xVendorInfo->VendorSection[eFlashSectionVal].OffsetFromZeroForSectionStart; - - switch (eFlashSectionVal) { - case ISO_IMAGE1: - if ((Adapter->psFlash2xCSInfo->OffsetISOImage1Part1Start != UNINIT_PTR_IN_CS) && - (IsNonCDLessDevice(Adapter) == false)) - SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage1Part1Start); - break; - case ISO_IMAGE2: - if ((Adapter->psFlash2xCSInfo->OffsetISOImage2Part1Start != UNINIT_PTR_IN_CS) && - (IsNonCDLessDevice(Adapter) == false)) - SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage2Part1Start); - break; - case DSD0: - if (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDStart != UNINIT_PTR_IN_CS) - SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDStart); - break; - case DSD1: - if (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1Start != UNINIT_PTR_IN_CS) - SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1Start); - break; - case DSD2: - if (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2Start != UNINIT_PTR_IN_CS) - SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2Start); - break; - case VSA0: - if (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSAStart != UNINIT_PTR_IN_CS) - SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSAStart); - break; - case VSA1: - if (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA1Start != UNINIT_PTR_IN_CS) - SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA1Start); - break; - case VSA2: - if (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA2Start != UNINIT_PTR_IN_CS) - SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA2Start); - break; - case SCSI: - if (Adapter->psFlash2xCSInfo->OffsetFromZeroForScsiFirmware != UNINIT_PTR_IN_CS) - SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForScsiFirmware); - break; - case CONTROL_SECTION: - if (Adapter->psFlash2xCSInfo->OffsetFromZeroForControlSectionStart != UNINIT_PTR_IN_CS) - SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForControlSectionStart); - break; - case ISO_IMAGE1_PART2: - if (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2Start != UNINIT_PTR_IN_CS) - SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2Start); - break; - case ISO_IMAGE1_PART3: - if (Adapter->psFlash2xCSInfo->OffsetISOImage1Part3Start != UNINIT_PTR_IN_CS) - SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage1Part3Start); - break; - case ISO_IMAGE2_PART2: - if (Adapter->psFlash2xCSInfo->OffsetISOImage2Part2Start != UNINIT_PTR_IN_CS) - SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage2Part2Start); - break; - case ISO_IMAGE2_PART3: - if (Adapter->psFlash2xCSInfo->OffsetISOImage2Part3Start != UNINIT_PTR_IN_CS) - SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage2Part3Start); - break; - default: - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section Does not exist in Flash 2.x"); - SectStartOffset = INVALID_OFFSET; - } - - return SectStartOffset; -} - -/* - * BcmGetSectionValEndOffset - this will calculate the section's Ending offset if section val is given - * @Adapter : Drivers Private Data structure - * @eFlashSectionVal : Flash secion value defined in enum bcm_flash2x_section_val - * - * Return value:- - * On success it return the end offset of the provided section val - * On Failure -returns STATUS_FAILURE - */ - -static int BcmGetSectionValEndOffset(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlash2xSectionVal) -{ - int SectEndOffset = 0; - - SectEndOffset = INVALID_OFFSET; - if (IsSectionExistInVendorInfo(Adapter, eFlash2xSectionVal)) - return Adapter->psFlash2xVendorInfo->VendorSection[eFlash2xSectionVal].OffsetFromZeroForSectionEnd; - - switch (eFlash2xSectionVal) { - case ISO_IMAGE1: - if ((Adapter->psFlash2xCSInfo->OffsetISOImage1Part1End != UNINIT_PTR_IN_CS) && - (IsNonCDLessDevice(Adapter) == false)) - SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage1Part1End); - break; - case ISO_IMAGE2: - if ((Adapter->psFlash2xCSInfo->OffsetISOImage2Part1End != UNINIT_PTR_IN_CS) && - (IsNonCDLessDevice(Adapter) == false)) - SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage2Part1End); - break; - case DSD0: - if (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDEnd != UNINIT_PTR_IN_CS) - SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDEnd); - break; - case DSD1: - if (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1End != UNINIT_PTR_IN_CS) - SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1End); - break; - case DSD2: - if (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2End != UNINIT_PTR_IN_CS) - SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2End); - break; - case VSA0: - if (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSAEnd != UNINIT_PTR_IN_CS) - SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSAEnd); - break; - case VSA1: - if (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA1End != UNINIT_PTR_IN_CS) - SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA1End); - break; - case VSA2: - if (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA2End != UNINIT_PTR_IN_CS) - SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA2End); - break; - case SCSI: - if (Adapter->psFlash2xCSInfo->OffsetFromZeroForScsiFirmware != UNINIT_PTR_IN_CS) - SectEndOffset = ((Adapter->psFlash2xCSInfo->OffsetFromZeroForScsiFirmware) + - (Adapter->psFlash2xCSInfo->SizeOfScsiFirmware)); - break; - case CONTROL_SECTION: - /* Not Clear So Putting failure. confirm and fix it. */ - SectEndOffset = STATUS_FAILURE; - break; - case ISO_IMAGE1_PART2: - if (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2End != UNINIT_PTR_IN_CS) - SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2End); - break; - case ISO_IMAGE1_PART3: - if (Adapter->psFlash2xCSInfo->OffsetISOImage1Part3End != UNINIT_PTR_IN_CS) - SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage1Part3End); - break; - case ISO_IMAGE2_PART2: - if (Adapter->psFlash2xCSInfo->OffsetISOImage2Part2End != UNINIT_PTR_IN_CS) - SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage2Part2End); - break; - case ISO_IMAGE2_PART3: - if (Adapter->psFlash2xCSInfo->OffsetISOImage2Part3End != UNINIT_PTR_IN_CS) - SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage2Part3End); - break; - default: - SectEndOffset = INVALID_OFFSET; - } - - return SectEndOffset; -} - -/* - * BcmFlash2xBulkRead:- Read API for Flash Map 2.x . - * @Adapter :Driver Private Data Structure - * @pBuffer : Buffer where data has to be put after reading - * @eFlashSectionVal :Flash Section Val defined in enum bcm_flash2x_section_val - * @uiOffsetWithinSectionVal :- Offset with in provided section - * @uiNumBytes : Number of Bytes for Read - * - * Return value:- - * return true on success and STATUS_FAILURE on fail. - */ - -int BcmFlash2xBulkRead(struct bcm_mini_adapter *Adapter, - PUINT pBuffer, - enum bcm_flash2x_section_val eFlash2xSectionVal, - unsigned int uiOffsetWithinSectionVal, - unsigned int uiNumBytes) -{ - int Status = STATUS_SUCCESS; - int SectionStartOffset = 0; - unsigned int uiAbsoluteOffset = 0; - unsigned int uiTemp = 0, value = 0; - - if (!Adapter) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Adapter structure is NULL"); - return -EINVAL; - } - if (Adapter->device_removed) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device has been removed"); - return -ENODEV; - } - - /* NO_SECTION_VAL means absolute offset is given. */ - if (eFlash2xSectionVal == NO_SECTION_VAL) - SectionStartOffset = 0; - else - SectionStartOffset = BcmGetSectionValStartOffset(Adapter, eFlash2xSectionVal); - - if (SectionStartOffset == STATUS_FAILURE) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "This Section<%d> does not exist in Flash 2.x Map ", eFlash2xSectionVal); - return -EINVAL; - } - - if (IsSectionExistInVendorInfo(Adapter, eFlash2xSectionVal)) - return vendorextnReadSection(Adapter, (PUCHAR)pBuffer, eFlash2xSectionVal, uiOffsetWithinSectionVal, uiNumBytes); - - /* calculating the absolute offset from FLASH; */ - uiAbsoluteOffset = uiOffsetWithinSectionVal + SectionStartOffset; - rdmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp)); - value = 0; - wrmalt(Adapter, 0x0f000C80, &value, sizeof(value)); - Status = BeceemFlashBulkRead(Adapter, pBuffer, uiAbsoluteOffset, uiNumBytes); - wrmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp)); - if (Status) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Read Failed with Status :%d", Status); - return Status; - } - - return Status; -} - -/* - * BcmFlash2xBulkWrite :-API for Writing on the Flash Map 2.x. - * @Adapter :Driver Private Data Structure - * @pBuffer : Buffer From where data has to taken for writing - * @eFlashSectionVal :Flash Section Val defined in enum bcm_flash2x_section_val - * @uiOffsetWithinSectionVal :- Offset with in provided section - * @uiNumBytes : Number of Bytes for Write - * - * Return value:- - * return true on success and STATUS_FAILURE on fail. - * - */ - -int BcmFlash2xBulkWrite(struct bcm_mini_adapter *Adapter, - PUINT pBuffer, - enum bcm_flash2x_section_val eFlash2xSectVal, - unsigned int uiOffset, - unsigned int uiNumBytes, - unsigned int bVerify) -{ - int Status = STATUS_SUCCESS; - unsigned int FlashSectValStartOffset = 0; - unsigned int uiTemp = 0, value = 0; - - if (!Adapter) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Adapter structure is NULL"); - return -EINVAL; - } - - if (Adapter->device_removed) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device has been removed"); - return -ENODEV; - } - - /* NO_SECTION_VAL means absolute offset is given. */ - if (eFlash2xSectVal == NO_SECTION_VAL) - FlashSectValStartOffset = 0; - else - FlashSectValStartOffset = BcmGetSectionValStartOffset(Adapter, eFlash2xSectVal); - - if (FlashSectValStartOffset == STATUS_FAILURE) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "This Section<%d> does not exist in Flash Map 2.x", eFlash2xSectVal); - return -EINVAL; - } - - if (IsSectionExistInVendorInfo(Adapter, eFlash2xSectVal)) - return vendorextnWriteSection(Adapter, (PUCHAR)pBuffer, eFlash2xSectVal, uiOffset, uiNumBytes, bVerify); - - /* calculating the absolute offset from FLASH; */ - uiOffset = uiOffset + FlashSectValStartOffset; - - rdmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp)); - value = 0; - wrmalt(Adapter, 0x0f000C80, &value, sizeof(value)); - - Status = BeceemFlashBulkWrite(Adapter, pBuffer, uiOffset, uiNumBytes, bVerify); - - wrmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp)); - if (Status) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Write failed with Status :%d", Status); - return Status; - } - - return Status; -} - -/* - * BcmGetActiveDSD : Set the Active DSD in Adapter Structure which has to be dumped in DDR - * @Adapter :-Drivers private Data Structure - * - * Return Value:- - * Return STATUS_SUCESS if get success in setting the right DSD else negative error code - * - */ - -static int BcmGetActiveDSD(struct bcm_mini_adapter *Adapter) -{ - enum bcm_flash2x_section_val uiHighestPriDSD = 0; - - uiHighestPriDSD = getHighestPriDSD(Adapter); - Adapter->eActiveDSD = uiHighestPriDSD; - - if (DSD0 == uiHighestPriDSD) - Adapter->ulFlashCalStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDStart; - if (DSD1 == uiHighestPriDSD) - Adapter->ulFlashCalStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1Start; - if (DSD2 == uiHighestPriDSD) - Adapter->ulFlashCalStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2Start; - if (Adapter->eActiveDSD) - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Active DSD :%d", Adapter->eActiveDSD); - if (Adapter->eActiveDSD == 0) { - /* if No DSD gets Active, Make Active the DSD with WR permission */ - if (IsSectionWritable(Adapter, DSD2)) { - Adapter->eActiveDSD = DSD2; - Adapter->ulFlashCalStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2Start; - } else if (IsSectionWritable(Adapter, DSD1)) { - Adapter->eActiveDSD = DSD1; - Adapter->ulFlashCalStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1Start; - } else if (IsSectionWritable(Adapter, DSD0)) { - Adapter->eActiveDSD = DSD0; - Adapter->ulFlashCalStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDStart; - } - } - - return STATUS_SUCCESS; -} - -/* - * BcmGetActiveISO :- Set the Active ISO in Adapter Data Structue - * @Adapter : Driver private Data Structure - * - * Return Value:- - * Sucsess:- STATUS_SUCESS - * Failure- : negative erro code - * - */ - -static int BcmGetActiveISO(struct bcm_mini_adapter *Adapter) -{ - int HighestPriISO = 0; - - HighestPriISO = getHighestPriISO(Adapter); - - Adapter->eActiveISO = HighestPriISO; - if (Adapter->eActiveISO == ISO_IMAGE2) - Adapter->uiActiveISOOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage2Part1Start); - else if (Adapter->eActiveISO == ISO_IMAGE1) - Adapter->uiActiveISOOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage1Part1Start); - - if (Adapter->eActiveISO) - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Active ISO :%x", Adapter->eActiveISO); - - return STATUS_SUCCESS; -} - -/* - * IsOffsetWritable :- it will tell the access permission of the sector having passed offset - * @Adapter : Drivers Private Data Structure - * @uiOffset : Offset provided in the Flash - * - * Return Value:- - * Success:-TRUE , offset is writable - * Failure:-false, offset is RO - * - */ - -static B_UINT8 IsOffsetWritable(struct bcm_mini_adapter *Adapter, unsigned int uiOffset) -{ - unsigned int uiSectorNum = 0; - unsigned int uiWordOfSectorPermission = 0; - unsigned int uiBitofSectorePermission = 0; - B_UINT32 permissionBits = 0; - - uiSectorNum = uiOffset/Adapter->uiSectorSize; - - /* calculating the word having this Sector Access permission from SectorAccessBitMap Array */ - uiWordOfSectorPermission = Adapter->psFlash2xCSInfo->SectorAccessBitMap[uiSectorNum / 16]; - - /* calculating the bit index inside the word for this sector */ - uiBitofSectorePermission = 2 * (15 - uiSectorNum % 16); - - /* Setting Access permission */ - permissionBits = uiWordOfSectorPermission & (0x3 << uiBitofSectorePermission); - permissionBits = (permissionBits >> uiBitofSectorePermission) & 0x3; - if (permissionBits == SECTOR_READWRITE_PERMISSION) - return TRUE; - else - return false; -} - -static int BcmDumpFlash2xSectionBitMap(struct bcm_flash2x_bitmap *psFlash2xBitMap) -{ - struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "***************Flash 2.x Section Bitmap***************"); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "ISO_IMAGE1 :0X%x", psFlash2xBitMap->ISO_IMAGE1); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "ISO_IMAGE2 :0X%x", psFlash2xBitMap->ISO_IMAGE2); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "DSD0 :0X%x", psFlash2xBitMap->DSD0); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "DSD1 :0X%x", psFlash2xBitMap->DSD1); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "DSD2 :0X%x", psFlash2xBitMap->DSD2); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "VSA0 :0X%x", psFlash2xBitMap->VSA0); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "VSA1 :0X%x", psFlash2xBitMap->VSA1); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "VSA2 :0X%x", psFlash2xBitMap->VSA2); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "SCSI :0X%x", psFlash2xBitMap->SCSI); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "CONTROL_SECTION :0X%x", psFlash2xBitMap->CONTROL_SECTION); - - return STATUS_SUCCESS; -} - -/* - * BcmGetFlash2xSectionalBitMap :- It will provide the bit map of all the section present in Flash - * 8bit has been assigned to every section. - * bit[0] :Section present or not - * bit[1] :section is valid or not - * bit[2] : Secton is read only or has write permission too. - * bit[3] : Active Section - - * bit[7...4] = Reserved . - * - * @Adapter:-Driver private Data Structure - * - * Return value:- - * Success:- STATUS_SUCESS - * Failure:- negative error code - */ - -int BcmGetFlash2xSectionalBitMap(struct bcm_mini_adapter *Adapter, struct bcm_flash2x_bitmap *psFlash2xBitMap) -{ - struct bcm_flash2x_cs_info *psFlash2xCSInfo = Adapter->psFlash2xCSInfo; - enum bcm_flash2x_section_val uiHighestPriDSD = 0; - enum bcm_flash2x_section_val uiHighestPriISO = 0; - bool SetActiveDSDDone = false; - bool SetActiveISODone = false; - - /* For 1.x map all the section except DSD0 will be shown as not present - * This part will be used by calibration tool to detect the number of DSD present in Flash. - */ - if (IsFlash2x(Adapter) == false) { - psFlash2xBitMap->ISO_IMAGE2 = 0; - psFlash2xBitMap->ISO_IMAGE1 = 0; - psFlash2xBitMap->DSD0 = FLASH2X_SECTION_VALID | FLASH2X_SECTION_ACT | FLASH2X_SECTION_PRESENT; /* 0xF; 0000(Reseved)1(Active)0(RW)1(valid)1(present) */ - psFlash2xBitMap->DSD1 = 0; - psFlash2xBitMap->DSD2 = 0; - psFlash2xBitMap->VSA0 = 0; - psFlash2xBitMap->VSA1 = 0; - psFlash2xBitMap->VSA2 = 0; - psFlash2xBitMap->CONTROL_SECTION = 0; - psFlash2xBitMap->SCSI = 0; - psFlash2xBitMap->Reserved0 = 0; - psFlash2xBitMap->Reserved1 = 0; - psFlash2xBitMap->Reserved2 = 0; - - return STATUS_SUCCESS; - } - - uiHighestPriDSD = getHighestPriDSD(Adapter); - uiHighestPriISO = getHighestPriISO(Adapter); - - /* - * IS0 IMAGE 2 - */ - if ((psFlash2xCSInfo->OffsetISOImage2Part1Start) != UNINIT_PTR_IN_CS) { - /* Setting the 0th Bit representing the Section is present or not. */ - psFlash2xBitMap->ISO_IMAGE2 = psFlash2xBitMap->ISO_IMAGE2 | FLASH2X_SECTION_PRESENT; - - if (ReadISOSignature(Adapter, ISO_IMAGE2) == ISO_IMAGE_MAGIC_NUMBER) - psFlash2xBitMap->ISO_IMAGE2 |= FLASH2X_SECTION_VALID; - - /* Calculation for extrating the Access permission */ - if (IsSectionWritable(Adapter, ISO_IMAGE2) == false) - psFlash2xBitMap->ISO_IMAGE2 |= FLASH2X_SECTION_RO; - - if (SetActiveISODone == false && uiHighestPriISO == ISO_IMAGE2) { - psFlash2xBitMap->ISO_IMAGE2 |= FLASH2X_SECTION_ACT; - SetActiveISODone = TRUE; - } - } - - /* - * IS0 IMAGE 1 - */ - if ((psFlash2xCSInfo->OffsetISOImage1Part1Start) != UNINIT_PTR_IN_CS) { - /* Setting the 0th Bit representing the Section is present or not. */ - psFlash2xBitMap->ISO_IMAGE1 = psFlash2xBitMap->ISO_IMAGE1 | FLASH2X_SECTION_PRESENT; - - if (ReadISOSignature(Adapter, ISO_IMAGE1) == ISO_IMAGE_MAGIC_NUMBER) - psFlash2xBitMap->ISO_IMAGE1 |= FLASH2X_SECTION_VALID; - - /* Calculation for extrating the Access permission */ - if (IsSectionWritable(Adapter, ISO_IMAGE1) == false) - psFlash2xBitMap->ISO_IMAGE1 |= FLASH2X_SECTION_RO; - - if (SetActiveISODone == false && uiHighestPriISO == ISO_IMAGE1) { - psFlash2xBitMap->ISO_IMAGE1 |= FLASH2X_SECTION_ACT; - SetActiveISODone = TRUE; - } - } - - /* - * DSD2 - */ - if ((psFlash2xCSInfo->OffsetFromZeroForDSD2Start) != UNINIT_PTR_IN_CS) { - /* Setting the 0th Bit representing the Section is present or not. */ - psFlash2xBitMap->DSD2 = psFlash2xBitMap->DSD2 | FLASH2X_SECTION_PRESENT; - - if (ReadDSDSignature(Adapter, DSD2) == DSD_IMAGE_MAGIC_NUMBER) - psFlash2xBitMap->DSD2 |= FLASH2X_SECTION_VALID; - - /* Calculation for extrating the Access permission */ - if (IsSectionWritable(Adapter, DSD2) == false) { - psFlash2xBitMap->DSD2 |= FLASH2X_SECTION_RO; - } else { - /* Means section is writable */ - if ((SetActiveDSDDone == false) && (uiHighestPriDSD == DSD2)) { - psFlash2xBitMap->DSD2 |= FLASH2X_SECTION_ACT; - SetActiveDSDDone = TRUE; - } - } - } - - /* - * DSD 1 - */ - if ((psFlash2xCSInfo->OffsetFromZeroForDSD1Start) != UNINIT_PTR_IN_CS) { - /* Setting the 0th Bit representing the Section is present or not. */ - psFlash2xBitMap->DSD1 = psFlash2xBitMap->DSD1 | FLASH2X_SECTION_PRESENT; - - if (ReadDSDSignature(Adapter, DSD1) == DSD_IMAGE_MAGIC_NUMBER) - psFlash2xBitMap->DSD1 |= FLASH2X_SECTION_VALID; - - /* Calculation for extrating the Access permission */ - if (IsSectionWritable(Adapter, DSD1) == false) { - psFlash2xBitMap->DSD1 |= FLASH2X_SECTION_RO; - } else { - /* Means section is writable */ - if ((SetActiveDSDDone == false) && (uiHighestPriDSD == DSD1)) { - psFlash2xBitMap->DSD1 |= FLASH2X_SECTION_ACT; - SetActiveDSDDone = TRUE; - } - } - } - - /* - * For DSD 0 - */ - if ((psFlash2xCSInfo->OffsetFromZeroForDSDStart) != UNINIT_PTR_IN_CS) { - /* Setting the 0th Bit representing the Section is present or not. */ - psFlash2xBitMap->DSD0 = psFlash2xBitMap->DSD0 | FLASH2X_SECTION_PRESENT; - - if (ReadDSDSignature(Adapter, DSD0) == DSD_IMAGE_MAGIC_NUMBER) - psFlash2xBitMap->DSD0 |= FLASH2X_SECTION_VALID; - - /* Setting Access permission */ - if (IsSectionWritable(Adapter, DSD0) == false) { - psFlash2xBitMap->DSD0 |= FLASH2X_SECTION_RO; - } else { - /* Means section is writable */ - if ((SetActiveDSDDone == false) && (uiHighestPriDSD == DSD0)) { - psFlash2xBitMap->DSD0 |= FLASH2X_SECTION_ACT; - SetActiveDSDDone = TRUE; - } - } - } - - /* - * VSA 0 - */ - if ((psFlash2xCSInfo->OffsetFromZeroForVSAStart) != UNINIT_PTR_IN_CS) { - /* Setting the 0th Bit representing the Section is present or not. */ - psFlash2xBitMap->VSA0 = psFlash2xBitMap->VSA0 | FLASH2X_SECTION_PRESENT; - - /* Setting the Access Bit. Map is not defined hece setting it always valid */ - psFlash2xBitMap->VSA0 |= FLASH2X_SECTION_VALID; - - /* Calculation for extrating the Access permission */ - if (IsSectionWritable(Adapter, VSA0) == false) - psFlash2xBitMap->VSA0 |= FLASH2X_SECTION_RO; - - /* By Default section is Active */ - psFlash2xBitMap->VSA0 |= FLASH2X_SECTION_ACT; - } - - /* - * VSA 1 - */ - if ((psFlash2xCSInfo->OffsetFromZeroForVSA1Start) != UNINIT_PTR_IN_CS) { - /* Setting the 0th Bit representing the Section is present or not. */ - psFlash2xBitMap->VSA1 = psFlash2xBitMap->VSA1 | FLASH2X_SECTION_PRESENT; - - /* Setting the Access Bit. Map is not defined hece setting it always valid */ - psFlash2xBitMap->VSA1 |= FLASH2X_SECTION_VALID; - - /* Checking For Access permission */ - if (IsSectionWritable(Adapter, VSA1) == false) - psFlash2xBitMap->VSA1 |= FLASH2X_SECTION_RO; - - /* By Default section is Active */ - psFlash2xBitMap->VSA1 |= FLASH2X_SECTION_ACT; - } - - /* - * VSA 2 - */ - if ((psFlash2xCSInfo->OffsetFromZeroForVSA2Start) != UNINIT_PTR_IN_CS) { - /* Setting the 0th Bit representing the Section is present or not. */ - psFlash2xBitMap->VSA2 = psFlash2xBitMap->VSA2 | FLASH2X_SECTION_PRESENT; - - /* Setting the Access Bit. Map is not defined hece setting it always valid */ - psFlash2xBitMap->VSA2 |= FLASH2X_SECTION_VALID; - - /* Checking For Access permission */ - if (IsSectionWritable(Adapter, VSA2) == false) - psFlash2xBitMap->VSA2 |= FLASH2X_SECTION_RO; - - /* By Default section is Active */ - psFlash2xBitMap->VSA2 |= FLASH2X_SECTION_ACT; - } - - /* - * SCSI Section - */ - if ((psFlash2xCSInfo->OffsetFromZeroForScsiFirmware) != UNINIT_PTR_IN_CS) { - /* Setting the 0th Bit representing the Section is present or not. */ - psFlash2xBitMap->SCSI = psFlash2xBitMap->SCSI | FLASH2X_SECTION_PRESENT; - - /* Setting the Access Bit. Map is not defined hece setting it always valid */ - psFlash2xBitMap->SCSI |= FLASH2X_SECTION_VALID; - - /* Checking For Access permission */ - if (IsSectionWritable(Adapter, SCSI) == false) - psFlash2xBitMap->SCSI |= FLASH2X_SECTION_RO; - - /* By Default section is Active */ - psFlash2xBitMap->SCSI |= FLASH2X_SECTION_ACT; - } - - /* - * Control Section - */ - if ((psFlash2xCSInfo->OffsetFromZeroForControlSectionStart) != UNINIT_PTR_IN_CS) { - /* Setting the 0th Bit representing the Section is present or not. */ - psFlash2xBitMap->CONTROL_SECTION = psFlash2xBitMap->CONTROL_SECTION | (FLASH2X_SECTION_PRESENT); - - /* Setting the Access Bit. Map is not defined hece setting it always valid */ - psFlash2xBitMap->CONTROL_SECTION |= FLASH2X_SECTION_VALID; - - /* Checking For Access permission */ - if (IsSectionWritable(Adapter, CONTROL_SECTION) == false) - psFlash2xBitMap->CONTROL_SECTION |= FLASH2X_SECTION_RO; - - /* By Default section is Active */ - psFlash2xBitMap->CONTROL_SECTION |= FLASH2X_SECTION_ACT; - } - - /* - * For Reserved Sections - */ - psFlash2xBitMap->Reserved0 = 0; - psFlash2xBitMap->Reserved0 = 0; - psFlash2xBitMap->Reserved0 = 0; - BcmDumpFlash2xSectionBitMap(psFlash2xBitMap); - - return STATUS_SUCCESS; -} - -/* - * BcmSetActiveSection :- Set Active section is used to make priority field highest over other - * section of same type. - * - * @Adapater :- Bcm Driver Private Data Structure - * @eFlash2xSectionVal :- Flash section val whose priority has to be made highest. - * - * Return Value:- Make the priorit highest else return erorr code - * - */ - -int BcmSetActiveSection(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlash2xSectVal) -{ - unsigned int SectImagePriority = 0; - int Status = STATUS_SUCCESS; - - /* struct bcm_dsd_header sDSD = {0}; - * struct bcm_iso_header sISO = {0}; - */ - int HighestPriDSD = 0; - int HighestPriISO = 0; - - Status = IsSectionWritable(Adapter, eFlash2xSectVal); - if (Status != TRUE) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Provided Section <%d> is not writable", eFlash2xSectVal); - return STATUS_FAILURE; - } - - Adapter->bHeaderChangeAllowed = TRUE; - switch (eFlash2xSectVal) { - case ISO_IMAGE1: - case ISO_IMAGE2: - if (ReadISOSignature(Adapter, eFlash2xSectVal) == ISO_IMAGE_MAGIC_NUMBER) { - HighestPriISO = getHighestPriISO(Adapter); - - if (HighestPriISO == eFlash2xSectVal) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Given ISO<%x> already has highest priority", eFlash2xSectVal); - Status = STATUS_SUCCESS; - break; - } - - SectImagePriority = ReadISOPriority(Adapter, HighestPriISO) + 1; - - if ((SectImagePriority == 0) && IsSectionWritable(Adapter, HighestPriISO)) { - /* This is a SPECIAL Case which will only happen if the current highest priority ISO has priority value = 0x7FFFFFFF. - * We will write 1 to the current Highest priority ISO And then shall increase the priority of the requested ISO - * by user - */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "SectImagePriority wraparound happened, eFlash2xSectVal: 0x%x\n", eFlash2xSectVal); - SectImagePriority = htonl(0x1); - Status = BcmFlash2xBulkWrite(Adapter, - &SectImagePriority, - HighestPriISO, - 0 + FIELD_OFFSET_IN_HEADER(struct bcm_iso_header *, ISOImagePriority), - SIGNATURE_SIZE, - TRUE); - if (Status) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Priority has not been written properly"); - Status = STATUS_FAILURE; - break; - } - - HighestPriISO = getHighestPriISO(Adapter); - - if (HighestPriISO == eFlash2xSectVal) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Given ISO<%x> already has highest priority", eFlash2xSectVal); - Status = STATUS_SUCCESS; - break; - } - - SectImagePriority = 2; - } - - SectImagePriority = htonl(SectImagePriority); - - Status = BcmFlash2xBulkWrite(Adapter, - &SectImagePriority, - eFlash2xSectVal, - 0 + FIELD_OFFSET_IN_HEADER(struct bcm_iso_header *, ISOImagePriority), - SIGNATURE_SIZE, - TRUE); - if (Status) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Priority has not been written properly"); - break; - } - } else { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Signature is currupted. Hence can't increase the priority"); - Status = STATUS_FAILURE; - break; - } - break; - case DSD0: - case DSD1: - case DSD2: - if (ReadDSDSignature(Adapter, eFlash2xSectVal) == DSD_IMAGE_MAGIC_NUMBER) { - HighestPriDSD = getHighestPriDSD(Adapter); - if (HighestPriDSD == eFlash2xSectVal) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Given DSD<%x> already has highest priority", eFlash2xSectVal); - Status = STATUS_SUCCESS; - break; - } - - SectImagePriority = ReadDSDPriority(Adapter, HighestPriDSD) + 1; - if (SectImagePriority == 0) { - /* This is a SPECIAL Case which will only happen if the current highest priority DSD has priority value = 0x7FFFFFFF. - * We will write 1 to the current Highest priority DSD And then shall increase the priority of the requested DSD - * by user - */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, NVM_RW, DBG_LVL_ALL, "SectImagePriority wraparound happened, eFlash2xSectVal: 0x%x\n", eFlash2xSectVal); - SectImagePriority = htonl(0x1); - - Status = BcmFlash2xBulkWrite(Adapter, - &SectImagePriority, - HighestPriDSD, - Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImagePriority), - SIGNATURE_SIZE, - TRUE); - if (Status) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Priority has not been written properly"); - break; - } - - HighestPriDSD = getHighestPriDSD(Adapter); - - if (HighestPriDSD == eFlash2xSectVal) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Made the DSD: %x highest by reducing priority of other\n", eFlash2xSectVal); - Status = STATUS_SUCCESS; - break; - } - - SectImagePriority = htonl(0x2); - Status = BcmFlash2xBulkWrite(Adapter, - &SectImagePriority, - HighestPriDSD, - Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImagePriority), - SIGNATURE_SIZE, - TRUE); - if (Status) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Priority has not been written properly"); - break; - } - - HighestPriDSD = getHighestPriDSD(Adapter); - if (HighestPriDSD == eFlash2xSectVal) { - Status = STATUS_SUCCESS; - break; - } - - SectImagePriority = 3; - } - SectImagePriority = htonl(SectImagePriority); - Status = BcmFlash2xBulkWrite(Adapter, - &SectImagePriority, - eFlash2xSectVal, - Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImagePriority), - SIGNATURE_SIZE, - TRUE); - if (Status) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Priority has not been written properly"); - Status = STATUS_FAILURE; - break; - } - } else { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Signature is currupted. Hence can't increase the priority"); - Status = STATUS_FAILURE; - break; - } - break; - case VSA0: - case VSA1: - case VSA2: - /* Has to be decided */ - break; - default: - Status = STATUS_FAILURE; - break; - } - - Adapter->bHeaderChangeAllowed = false; - return Status; -} - -/* - * BcmCopyISO - Used only for copying the ISO section - * @Adapater :- Bcm Driver Private Data Structure - * @sCopySectStrut :- Section copy structure - * - * Return value:- SUCCESS if copies successfully else negative error code - * - */ - -int BcmCopyISO(struct bcm_mini_adapter *Adapter, struct bcm_flash2x_copy_section sCopySectStrut) -{ - PCHAR Buff = NULL; - enum bcm_flash2x_section_val eISOReadPart = 0, eISOWritePart = 0; - unsigned int uiReadOffsetWithinPart = 0, uiWriteOffsetWithinPart = 0; - unsigned int uiTotalDataToCopy = 0; - bool IsThisHeaderSector = false; - unsigned int sigOffset = 0; - unsigned int ISOLength = 0; - unsigned int Status = STATUS_SUCCESS; - unsigned int SigBuff[MAX_RW_SIZE]; - unsigned int i = 0; - - if (ReadISOSignature(Adapter, sCopySectStrut.SrcSection) != ISO_IMAGE_MAGIC_NUMBER) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "error as Source ISO Section does not have valid signature"); - return STATUS_FAILURE; - } - - Status = BcmFlash2xBulkRead(Adapter, &ISOLength, - sCopySectStrut.SrcSection, - 0 + FIELD_OFFSET_IN_HEADER(struct bcm_iso_header *, ISOImageSize), - 4); - if (Status) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Read failed while copying ISO\n"); - return Status; - } - - ISOLength = htonl(ISOLength); - if (ISOLength % Adapter->uiSectorSize) - ISOLength = Adapter->uiSectorSize * (1 + ISOLength/Adapter->uiSectorSize); - - sigOffset = FIELD_OFFSET_IN_HEADER(struct bcm_iso_header *, ISOImageMagicNumber); - - Buff = kzalloc(Adapter->uiSectorSize, GFP_KERNEL); - - if (!Buff) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Memory allocation failed for section size"); - return -ENOMEM; - } - - if (sCopySectStrut.SrcSection == ISO_IMAGE1 && sCopySectStrut.DstSection == ISO_IMAGE2) { - eISOReadPart = ISO_IMAGE1; - eISOWritePart = ISO_IMAGE2; - uiReadOffsetWithinPart = 0; - uiWriteOffsetWithinPart = 0; - - uiTotalDataToCopy = (Adapter->psFlash2xCSInfo->OffsetISOImage1Part1End) - - (Adapter->psFlash2xCSInfo->OffsetISOImage1Part1Start) + - (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2End) - - (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2Start) + - (Adapter->psFlash2xCSInfo->OffsetISOImage1Part3End) - - (Adapter->psFlash2xCSInfo->OffsetISOImage1Part3Start); - - if (uiTotalDataToCopy < ISOLength) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "error as Source ISO Section does not have valid signature"); - Status = STATUS_FAILURE; - goto out; - } - - uiTotalDataToCopy = (Adapter->psFlash2xCSInfo->OffsetISOImage2Part1End) - - (Adapter->psFlash2xCSInfo->OffsetISOImage2Part1Start) + - (Adapter->psFlash2xCSInfo->OffsetISOImage2Part2End) - - (Adapter->psFlash2xCSInfo->OffsetISOImage2Part2Start) + - (Adapter->psFlash2xCSInfo->OffsetISOImage2Part3End) - - (Adapter->psFlash2xCSInfo->OffsetISOImage2Part3Start); - - if (uiTotalDataToCopy < ISOLength) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "error as Dest ISO Section does not have enough section size"); - Status = STATUS_FAILURE; - goto out; - } - - uiTotalDataToCopy = ISOLength; - - CorruptISOSig(Adapter, ISO_IMAGE2); - while (uiTotalDataToCopy) { - if (uiTotalDataToCopy == Adapter->uiSectorSize) { - /* Setting for write of first sector. First sector is assumed to be written in last */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Writing the signature sector"); - eISOReadPart = ISO_IMAGE1; - uiReadOffsetWithinPart = 0; - eISOWritePart = ISO_IMAGE2; - uiWriteOffsetWithinPart = 0; - IsThisHeaderSector = TRUE; - } else { - uiReadOffsetWithinPart = uiReadOffsetWithinPart + Adapter->uiSectorSize; - uiWriteOffsetWithinPart = uiWriteOffsetWithinPart + Adapter->uiSectorSize; - - if ((eISOReadPart == ISO_IMAGE1) && (uiReadOffsetWithinPart == (Adapter->psFlash2xCSInfo->OffsetISOImage1Part1End - Adapter->psFlash2xCSInfo->OffsetISOImage1Part1Start))) { - eISOReadPart = ISO_IMAGE1_PART2; - uiReadOffsetWithinPart = 0; - } - - if ((eISOReadPart == ISO_IMAGE1_PART2) && (uiReadOffsetWithinPart == (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2End - Adapter->psFlash2xCSInfo->OffsetISOImage1Part2Start))) { - eISOReadPart = ISO_IMAGE1_PART3; - uiReadOffsetWithinPart = 0; - } - - if ((eISOWritePart == ISO_IMAGE2) && (uiWriteOffsetWithinPart == (Adapter->psFlash2xCSInfo->OffsetISOImage2Part1End - Adapter->psFlash2xCSInfo->OffsetISOImage2Part1Start))) { - eISOWritePart = ISO_IMAGE2_PART2; - uiWriteOffsetWithinPart = 0; - } - - if ((eISOWritePart == ISO_IMAGE2_PART2) && (uiWriteOffsetWithinPart == (Adapter->psFlash2xCSInfo->OffsetISOImage2Part2End - Adapter->psFlash2xCSInfo->OffsetISOImage2Part2Start))) { - eISOWritePart = ISO_IMAGE2_PART3; - uiWriteOffsetWithinPart = 0; - } - } - - Status = BcmFlash2xBulkRead(Adapter, - (PUINT)Buff, - eISOReadPart, - uiReadOffsetWithinPart, - Adapter->uiSectorSize); - if (Status) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Read failed while copying ISO: Part: %x, OffsetWithinPart: %x\n", eISOReadPart, uiReadOffsetWithinPart); - break; - } - - if (IsThisHeaderSector == TRUE) { - /* If this is header sector write 0xFFFFFFFF at the sig time and in last write sig */ - memcpy(SigBuff, Buff + sigOffset, sizeof(SigBuff)); - - for (i = 0; i < MAX_RW_SIZE; i++) - *(Buff + sigOffset + i) = 0xFF; - } - Adapter->bHeaderChangeAllowed = TRUE; - Status = BcmFlash2xBulkWrite(Adapter, - (PUINT)Buff, - eISOWritePart, - uiWriteOffsetWithinPart, - Adapter->uiSectorSize, - TRUE); - if (Status) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Write failed while copying ISO: Part: %x, OffsetWithinPart: %x\n", eISOWritePart, uiWriteOffsetWithinPart); - break; - } - - Adapter->bHeaderChangeAllowed = false; - if (IsThisHeaderSector == TRUE) { - WriteToFlashWithoutSectorErase(Adapter, - SigBuff, - eISOWritePart, - sigOffset, - MAX_RW_SIZE); - IsThisHeaderSector = false; - } - /* subtracting the written Data */ - uiTotalDataToCopy = uiTotalDataToCopy - Adapter->uiSectorSize; - } - } - - if (sCopySectStrut.SrcSection == ISO_IMAGE2 && sCopySectStrut.DstSection == ISO_IMAGE1) { - eISOReadPart = ISO_IMAGE2; - eISOWritePart = ISO_IMAGE1; - uiReadOffsetWithinPart = 0; - uiWriteOffsetWithinPart = 0; - - uiTotalDataToCopy = (Adapter->psFlash2xCSInfo->OffsetISOImage2Part1End) - - (Adapter->psFlash2xCSInfo->OffsetISOImage2Part1Start) + - (Adapter->psFlash2xCSInfo->OffsetISOImage2Part2End) - - (Adapter->psFlash2xCSInfo->OffsetISOImage2Part2Start) + - (Adapter->psFlash2xCSInfo->OffsetISOImage2Part3End) - - (Adapter->psFlash2xCSInfo->OffsetISOImage2Part3Start); - - if (uiTotalDataToCopy < ISOLength) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "error as Source ISO Section does not have valid signature"); - Status = STATUS_FAILURE; - goto out; - } - - uiTotalDataToCopy = (Adapter->psFlash2xCSInfo->OffsetISOImage1Part1End) - - (Adapter->psFlash2xCSInfo->OffsetISOImage1Part1Start) + - (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2End) - - (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2Start) + - (Adapter->psFlash2xCSInfo->OffsetISOImage1Part3End) - - (Adapter->psFlash2xCSInfo->OffsetISOImage1Part3Start); - - if (uiTotalDataToCopy < ISOLength) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "error as Dest ISO Section does not have enough section size"); - Status = STATUS_FAILURE; - goto out; - } - - uiTotalDataToCopy = ISOLength; - - CorruptISOSig(Adapter, ISO_IMAGE1); - - while (uiTotalDataToCopy) { - if (uiTotalDataToCopy == Adapter->uiSectorSize) { - /* Setting for write of first sector. First sector is assumed to be written in last */ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Writing the signature sector"); - eISOReadPart = ISO_IMAGE2; - uiReadOffsetWithinPart = 0; - eISOWritePart = ISO_IMAGE1; - uiWriteOffsetWithinPart = 0; - IsThisHeaderSector = TRUE; - } else { - uiReadOffsetWithinPart = uiReadOffsetWithinPart + Adapter->uiSectorSize; - uiWriteOffsetWithinPart = uiWriteOffsetWithinPart + Adapter->uiSectorSize; - - if ((eISOReadPart == ISO_IMAGE2) && (uiReadOffsetWithinPart == (Adapter->psFlash2xCSInfo->OffsetISOImage2Part1End - Adapter->psFlash2xCSInfo->OffsetISOImage2Part1Start))) { - eISOReadPart = ISO_IMAGE2_PART2; - uiReadOffsetWithinPart = 0; - } - - if ((eISOReadPart == ISO_IMAGE2_PART2) && (uiReadOffsetWithinPart == (Adapter->psFlash2xCSInfo->OffsetISOImage2Part2End - Adapter->psFlash2xCSInfo->OffsetISOImage2Part2Start))) { - eISOReadPart = ISO_IMAGE2_PART3; - uiReadOffsetWithinPart = 0; - } - - if ((eISOWritePart == ISO_IMAGE1) && (uiWriteOffsetWithinPart == (Adapter->psFlash2xCSInfo->OffsetISOImage1Part1End - Adapter->psFlash2xCSInfo->OffsetISOImage1Part1Start))) { - eISOWritePart = ISO_IMAGE1_PART2; - uiWriteOffsetWithinPart = 0; - } - - if ((eISOWritePart == ISO_IMAGE1_PART2) && (uiWriteOffsetWithinPart == (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2End - Adapter->psFlash2xCSInfo->OffsetISOImage1Part2Start))) { - eISOWritePart = ISO_IMAGE1_PART3; - uiWriteOffsetWithinPart = 0; - } - } - - Status = BcmFlash2xBulkRead(Adapter, - (PUINT)Buff, - eISOReadPart, - uiReadOffsetWithinPart, - Adapter->uiSectorSize); - if (Status) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Read failed while copying ISO: Part: %x, OffsetWithinPart: %x\n", eISOReadPart, uiReadOffsetWithinPart); - break; - } - - if (IsThisHeaderSector == TRUE) { - /* If this is header sector write 0xFFFFFFFF at the sig time and in last write sig */ - memcpy(SigBuff, Buff + sigOffset, sizeof(SigBuff)); - - for (i = 0; i < MAX_RW_SIZE; i++) - *(Buff + sigOffset + i) = 0xFF; - } - Adapter->bHeaderChangeAllowed = TRUE; - Status = BcmFlash2xBulkWrite(Adapter, - (PUINT)Buff, - eISOWritePart, - uiWriteOffsetWithinPart, - Adapter->uiSectorSize, - TRUE); - if (Status) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Write failed while copying ISO: Part: %x, OffsetWithinPart: %x\n", eISOWritePart, uiWriteOffsetWithinPart); - break; - } - - Adapter->bHeaderChangeAllowed = false; - if (IsThisHeaderSector == TRUE) { - WriteToFlashWithoutSectorErase(Adapter, - SigBuff, - eISOWritePart, - sigOffset, - MAX_RW_SIZE); - - IsThisHeaderSector = false; - } - - /* subtracting the written Data */ - uiTotalDataToCopy = uiTotalDataToCopy - Adapter->uiSectorSize; - } - } -out: - kfree(Buff); - - return Status; -} - -/* - * BcmFlash2xCorruptSig : this API is used to corrupt the written sig in Bcm Header present in flash section. - * It will corrupt the sig, if Section is writable, by making first bytes as zero. - * @Adapater :- Bcm Driver Private Data Structure - * @eFlash2xSectionVal :- Flash section val which has header - * - * Return Value :- - * Success :- If Section is present and writable, corrupt the sig and return STATUS_SUCCESS - * Failure :-Return negative error code - */ - -int BcmFlash2xCorruptSig(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlash2xSectionVal) -{ - int Status = STATUS_SUCCESS; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section Value :%x\n", eFlash2xSectionVal); - - if ((eFlash2xSectionVal == DSD0) || (eFlash2xSectionVal == DSD1) || (eFlash2xSectionVal == DSD2)) { - Status = CorruptDSDSig(Adapter, eFlash2xSectionVal); - } else if (eFlash2xSectionVal == ISO_IMAGE1 || eFlash2xSectionVal == ISO_IMAGE2) { - Status = CorruptISOSig(Adapter, eFlash2xSectionVal); - } else { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Given Section <%d>does not have Header", eFlash2xSectionVal); - return STATUS_SUCCESS; - } - return Status; -} - -/* - *BcmFlash2xWriteSig :-this API is used to Write the sig if requested Section has - * header and Write Permission. - * @Adapater :- Bcm Driver Private Data Structure - * @eFlashSectionVal :- Flash section val which has header - * - * Return Value :- - * Success :- If Section is present and writable write the sig and return STATUS_SUCCESS - * Failure :-Return negative error code - */ - -int BcmFlash2xWriteSig(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlashSectionVal) -{ - unsigned int uiSignature = 0; - unsigned int uiOffset = 0; - - /* struct bcm_dsd_header dsdHeader = {0}; */ - if (Adapter->bSigCorrupted == false) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Signature is not corrupted by driver, hence not restoring\n"); - return STATUS_SUCCESS; - } - - if (Adapter->bAllDSDWriteAllow == false) { - if (IsSectionWritable(Adapter, eFlashSectionVal) == false) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section is not Writable...Hence can't Write signature"); - return SECTOR_IS_NOT_WRITABLE; - } - } - - if ((eFlashSectionVal == DSD0) || (eFlashSectionVal == DSD1) || (eFlashSectionVal == DSD2)) { - uiSignature = htonl(DSD_IMAGE_MAGIC_NUMBER); - uiOffset = Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader; - - uiOffset += FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImageMagicNumber); - - if ((ReadDSDSignature(Adapter, eFlashSectionVal) & 0xFF000000) != CORRUPTED_PATTERN) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Corrupted Pattern is not there. Hence won't write sig"); - return STATUS_FAILURE; - } - } else if ((eFlashSectionVal == ISO_IMAGE1) || (eFlashSectionVal == ISO_IMAGE2)) { - uiSignature = htonl(ISO_IMAGE_MAGIC_NUMBER); - /* uiOffset = 0; */ - uiOffset = FIELD_OFFSET_IN_HEADER(struct bcm_iso_header *, ISOImageMagicNumber); - if ((ReadISOSignature(Adapter, eFlashSectionVal) & 0xFF000000) != CORRUPTED_PATTERN) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Currupted Pattern is not there. Hence won't write sig"); - return STATUS_FAILURE; - } - } else { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "GIVEN SECTION< %d > IS NOT VALID FOR SIG WRITE...", eFlashSectionVal); - return STATUS_FAILURE; - } - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Restoring the signature"); - - Adapter->bHeaderChangeAllowed = TRUE; - Adapter->bSigCorrupted = false; - BcmFlash2xBulkWrite(Adapter, &uiSignature, eFlashSectionVal, uiOffset, SIGNATURE_SIZE, TRUE); - Adapter->bHeaderChangeAllowed = false; - - return STATUS_SUCCESS; -} - -/* - * validateFlash2xReadWrite :- This API is used to validate the user request for Read/Write. - * if requested Bytes goes beyond the Requested section, it reports error. - * @Adapater :- Bcm Driver Private Data Structure - * @psFlash2xReadWrite :-Flash2x Read/write structure pointer - * - * Return values:-Return TRUE is request is valid else false. - */ - -int validateFlash2xReadWrite(struct bcm_mini_adapter *Adapter, struct bcm_flash2x_readwrite *psFlash2xReadWrite) -{ - unsigned int uiNumOfBytes = 0; - unsigned int uiSectStartOffset = 0; - unsigned int uiSectEndOffset = 0; - - uiNumOfBytes = psFlash2xReadWrite->numOfBytes; - - if (IsSectionExistInFlash(Adapter, psFlash2xReadWrite->Section) != TRUE) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section<%x> does not exist in Flash", psFlash2xReadWrite->Section); - return false; - } - uiSectStartOffset = BcmGetSectionValStartOffset(Adapter, psFlash2xReadWrite->Section); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Start offset :%x ,section :%d\n", uiSectStartOffset, psFlash2xReadWrite->Section); - if ((psFlash2xReadWrite->Section == ISO_IMAGE1) || (psFlash2xReadWrite->Section == ISO_IMAGE2)) { - if (psFlash2xReadWrite->Section == ISO_IMAGE1) { - uiSectEndOffset = BcmGetSectionValEndOffset(Adapter, ISO_IMAGE1) - - BcmGetSectionValStartOffset(Adapter, ISO_IMAGE1) + - BcmGetSectionValEndOffset(Adapter, ISO_IMAGE1_PART2) - - BcmGetSectionValStartOffset(Adapter, ISO_IMAGE1_PART2) + - BcmGetSectionValEndOffset(Adapter, ISO_IMAGE1_PART3) - - BcmGetSectionValStartOffset(Adapter, ISO_IMAGE1_PART3); - } else if (psFlash2xReadWrite->Section == ISO_IMAGE2) { - uiSectEndOffset = BcmGetSectionValEndOffset(Adapter, ISO_IMAGE2) - - BcmGetSectionValStartOffset(Adapter, ISO_IMAGE2) + - BcmGetSectionValEndOffset(Adapter, ISO_IMAGE2_PART2) - - BcmGetSectionValStartOffset(Adapter, ISO_IMAGE2_PART2) + - BcmGetSectionValEndOffset(Adapter, ISO_IMAGE2_PART3) - - BcmGetSectionValStartOffset(Adapter, ISO_IMAGE2_PART3); - } - - /* since this uiSectEndoffset is the size of iso Image. hence for calculating the virtual endoffset - * it should be added in startoffset. so that check done in last of this function can be valued. - */ - uiSectEndOffset = uiSectStartOffset + uiSectEndOffset; - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Total size of the ISO Image :%x", uiSectEndOffset); - } else - uiSectEndOffset = BcmGetSectionValEndOffset(Adapter, psFlash2xReadWrite->Section); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "End offset :%x\n", uiSectEndOffset); - - /* psFlash2xReadWrite->offset and uiNumOfBytes are user controlled and can lead to integer overflows */ - if (psFlash2xReadWrite->offset > uiSectEndOffset) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Invalid Request...."); - return false; - } - if (uiNumOfBytes > uiSectEndOffset) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Invalid Request...."); - return false; - } - /* Checking the boundary condition */ - if ((uiSectStartOffset + psFlash2xReadWrite->offset + uiNumOfBytes) <= uiSectEndOffset) - return TRUE; - else { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Invalid Request...."); - return false; - } -} - -/* - * IsFlash2x :- check for Flash 2.x - * Adapater :- Bcm Driver Private Data Structure - * - * Return value:- - * return TRUE if flah2.x of hgher version else return false. - */ - -int IsFlash2x(struct bcm_mini_adapter *Adapter) -{ - if (Adapter->uiFlashLayoutMajorVersion >= FLASH_2X_MAJOR_NUMBER) - return TRUE; - else - return false; -} - -/* - * GetFlashBaseAddr :- Calculate the Flash Base address - * @Adapater :- Bcm Driver Private Data Structure - * - * Return Value:- - * Success :- Base Address of the Flash - */ - -static int GetFlashBaseAddr(struct bcm_mini_adapter *Adapter) -{ - unsigned int uiBaseAddr = 0; - - if (Adapter->bDDRInitDone) { - /* - * For All Valid Flash Versions... except 1.1, take the value from FlashBaseAddr - * In case of Raw Read... use the default value - */ - if (Adapter->uiFlashLayoutMajorVersion && (Adapter->bFlashRawRead == false) && - !((Adapter->uiFlashLayoutMajorVersion == 1) && (Adapter->uiFlashLayoutMinorVersion == 1))) - uiBaseAddr = Adapter->uiFlashBaseAdd; - else - uiBaseAddr = FLASH_CONTIGIOUS_START_ADDR_AFTER_INIT; - } else { - /* - * For All Valid Flash Versions... except 1.1, take the value from FlashBaseAddr - * In case of Raw Read... use the default value - */ - if (Adapter->uiFlashLayoutMajorVersion && (Adapter->bFlashRawRead == false) && - !((Adapter->uiFlashLayoutMajorVersion == 1) && (Adapter->uiFlashLayoutMinorVersion == 1))) - uiBaseAddr = Adapter->uiFlashBaseAdd | FLASH_CONTIGIOUS_START_ADDR_BEFORE_INIT; - else - uiBaseAddr = FLASH_CONTIGIOUS_START_ADDR_BEFORE_INIT; - } - - return uiBaseAddr; -} - -/* - * BcmCopySection :- This API is used to copy the One section in another. Both section should - * be contiuous and of same size. Hence this Will not be applicabe to copy ISO. - * - * @Adapater :- Bcm Driver Private Data Structure - * @SrcSection :- Source section From where data has to be copied - * @DstSection :- Destination section to which data has to be copied - * @offset :- Offset from/to where data has to be copied from one section to another. - * @numOfBytes :- number of byes that has to be copyed from one section to another at given offset. - * in case of numofBytes equal zero complete section will be copied. - * Return Values- - * Success : Return STATUS_SUCCESS - * Faillure :- return negative error code - */ - -int BcmCopySection(struct bcm_mini_adapter *Adapter, - enum bcm_flash2x_section_val SrcSection, - enum bcm_flash2x_section_val DstSection, - unsigned int offset, - unsigned int numOfBytes) -{ - unsigned int BuffSize = 0; - unsigned int BytesToBeCopied = 0; - PUCHAR pBuff = NULL; - int Status = STATUS_SUCCESS; - - if (SrcSection == DstSection) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Source and Destination should be different ...try again"); - return -EINVAL; - } - - if ((SrcSection != DSD0) && (SrcSection != DSD1) && (SrcSection != DSD2)) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Source should be DSD subsection"); - return -EINVAL; - } - - if ((DstSection != DSD0) && (DstSection != DSD1) && (DstSection != DSD2)) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Destination should be DSD subsection"); - return -EINVAL; - } - - /* if offset zero means have to copy complete secton */ - if (numOfBytes == 0) { - numOfBytes = BcmGetSectionValEndOffset(Adapter, SrcSection) - - BcmGetSectionValStartOffset(Adapter, SrcSection); - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Section Size :0x%x", numOfBytes); - } - - if ((offset + numOfBytes) > BcmGetSectionValEndOffset(Adapter, SrcSection) - - BcmGetSectionValStartOffset(Adapter, SrcSection)) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, " Input parameters going beyond the section offS: %x numB: %x of Source Section\n", - offset, numOfBytes); - return -EINVAL; - } - - if ((offset + numOfBytes) > BcmGetSectionValEndOffset(Adapter, DstSection) - - BcmGetSectionValStartOffset(Adapter, DstSection)) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Input parameters going beyond the section offS: %x numB: %x of Destination Section\n", - offset, numOfBytes); - return -EINVAL; - } - - if (numOfBytes > Adapter->uiSectorSize) - BuffSize = Adapter->uiSectorSize; - else - BuffSize = numOfBytes; - - pBuff = kzalloc(BuffSize, GFP_KERNEL); - if (!pBuff) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Memory allocation failed.. "); - return -ENOMEM; - } - - BytesToBeCopied = Adapter->uiSectorSize; - if (offset % Adapter->uiSectorSize) - BytesToBeCopied = Adapter->uiSectorSize - (offset % Adapter->uiSectorSize); - if (BytesToBeCopied > numOfBytes) - BytesToBeCopied = numOfBytes; - - Adapter->bHeaderChangeAllowed = TRUE; - - do { - Status = BcmFlash2xBulkRead(Adapter, (PUINT)pBuff, SrcSection , offset, BytesToBeCopied); - if (Status) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Read failed at offset :%d for NOB :%d", SrcSection, BytesToBeCopied); - break; - } - Status = BcmFlash2xBulkWrite(Adapter, (PUINT)pBuff, DstSection, offset, BytesToBeCopied, false); - if (Status) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Write failed at offset :%d for NOB :%d", DstSection, BytesToBeCopied); - break; - } - offset = offset + BytesToBeCopied; - numOfBytes = numOfBytes - BytesToBeCopied; - if (numOfBytes) { - if (numOfBytes > Adapter->uiSectorSize) - BytesToBeCopied = Adapter->uiSectorSize; - else - BytesToBeCopied = numOfBytes; - } - } while (numOfBytes > 0); - - kfree(pBuff); - Adapter->bHeaderChangeAllowed = false; - - return Status; -} - -/* - * SaveHeaderIfPresent :- This API is use to Protect the Header in case of Header Sector write - * @Adapater :- Bcm Driver Private Data Structure - * @pBuff :- Data buffer that has to be written in sector having the header map. - * @uiOffset :- Flash offset that has to be written. - * - * Return value :- - * Success :- On success return STATUS_SUCCESS - * Faillure :- Return negative error code - */ - -static int SaveHeaderIfPresent(struct bcm_mini_adapter *Adapter, PUCHAR pBuff, unsigned int uiOffset) -{ - unsigned int offsetToProtect = 0, HeaderSizeToProtect = 0; - bool bHasHeader = false; - PUCHAR pTempBuff = NULL; - unsigned int uiSectAlignAddr = 0; - unsigned int sig = 0; - - /* making the offset sector aligned */ - uiSectAlignAddr = uiOffset & ~(Adapter->uiSectorSize - 1); - - if ((uiSectAlignAddr == BcmGetSectionValEndOffset(Adapter, DSD2) - Adapter->uiSectorSize) || - (uiSectAlignAddr == BcmGetSectionValEndOffset(Adapter, DSD1) - Adapter->uiSectorSize) || - (uiSectAlignAddr == BcmGetSectionValEndOffset(Adapter, DSD0) - Adapter->uiSectorSize)) { - /* offset from the sector boundary having the header map */ - offsetToProtect = Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader % Adapter->uiSectorSize; - HeaderSizeToProtect = sizeof(struct bcm_dsd_header); - bHasHeader = TRUE; - } - - if (uiSectAlignAddr == BcmGetSectionValStartOffset(Adapter, ISO_IMAGE1) || - uiSectAlignAddr == BcmGetSectionValStartOffset(Adapter, ISO_IMAGE2)) { - offsetToProtect = 0; - HeaderSizeToProtect = sizeof(struct bcm_iso_header); - bHasHeader = TRUE; - } - /* If Header is present overwrite passed buffer with this */ - if (bHasHeader && (Adapter->bHeaderChangeAllowed == false)) { - pTempBuff = kzalloc(HeaderSizeToProtect, GFP_KERNEL); - if (!pTempBuff) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Memory allocation failed"); - return -ENOMEM; - } - /* Read header */ - BeceemFlashBulkRead(Adapter, (PUINT)pTempBuff, (uiSectAlignAddr + offsetToProtect), HeaderSizeToProtect); - BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, pTempBuff, HeaderSizeToProtect); - /* Replace Buffer content with Header */ - memcpy(pBuff + offsetToProtect, pTempBuff, HeaderSizeToProtect); - - kfree(pTempBuff); - } - if (bHasHeader && Adapter->bSigCorrupted) { - sig = *((PUINT)(pBuff + offsetToProtect + FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImageMagicNumber))); - sig = ntohl(sig); - if ((sig & 0xFF000000) != CORRUPTED_PATTERN) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Desired pattern is not at sig offset. Hence won't restore"); - Adapter->bSigCorrupted = false; - return STATUS_SUCCESS; - } - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, " Corrupted sig is :%X", sig); - *((PUINT)(pBuff + offsetToProtect + FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImageMagicNumber))) = htonl(DSD_IMAGE_MAGIC_NUMBER); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Restoring the signature in Header Write only"); - Adapter->bSigCorrupted = false; - } - - return STATUS_SUCCESS; -} - -/* - * BcmDoChipSelect : This will selcet the appropriate chip for writing. - * @Adapater :- Bcm Driver Private Data Structure - * - * OutPut:- - * Select the Appropriate chip and retrn status Success - */ -static int BcmDoChipSelect(struct bcm_mini_adapter *Adapter, unsigned int offset) -{ - unsigned int FlashConfig = 0; - int ChipNum = 0; - unsigned int GPIOConfig = 0; - unsigned int PartNum = 0; - - ChipNum = offset / FLASH_PART_SIZE; - - /* - * Chip Select mapping to enable flash0. - * To select flash 0, we have to OR with (0<<12). - * ORing 0 will have no impact so not doing that part. - * In future if Chip select value changes from 0 to non zero, - * That needs be taken care with backward comaptibility. No worries for now. - */ - - /* - * SelectedChip Variable is the selection that the host is 100% Sure the same as what the register will hold. This can be ONLY ensured - * if the Chip doesn't goes to low power mode while the flash operation is in progress (NVMRdmWrmLock is taken) - * Before every new Flash Write operation, we reset the variable. This is to ensure that after any wake-up from - * power down modes (Idle mode/shutdown mode), the values in the register will be different. - */ - - if (Adapter->SelectedChip == ChipNum) - return STATUS_SUCCESS; - - /* BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Selected Chip :%x", ChipNum); */ - Adapter->SelectedChip = ChipNum; - - /* bit[13..12] will select the appropriate chip */ - rdmalt(Adapter, FLASH_CONFIG_REG, &FlashConfig, 4); - rdmalt(Adapter, FLASH_GPIO_CONFIG_REG, &GPIOConfig, 4); - { - switch (ChipNum) { - case 0: - PartNum = 0; - break; - case 1: - PartNum = 3; - GPIOConfig |= (0x4 << CHIP_SELECT_BIT12); - break; - case 2: - PartNum = 1; - GPIOConfig |= (0x1 << CHIP_SELECT_BIT12); - break; - case 3: - PartNum = 2; - GPIOConfig |= (0x2 << CHIP_SELECT_BIT12); - break; - } - } - /* In case the bits already written in the FLASH_CONFIG_REG is same as what the user desired, - * nothing to do... can return immediately. - * ASSUMPTION: FLASH_GPIO_CONFIG_REG will be in sync with FLASH_CONFIG_REG. - * Even if the chip goes to low power mode, it should wake with values in each register in sync with each other. - * These values are not written by host other than during CHIP_SELECT. - */ - if (PartNum == ((FlashConfig >> CHIP_SELECT_BIT12) & 0x3)) - return STATUS_SUCCESS; - - /* clearing the bit[13..12] */ - FlashConfig &= 0xFFFFCFFF; - FlashConfig = (FlashConfig | (PartNum<psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImageMagicNumber), - SIGNATURE_SIZE); - - uiDSDsig = ntohl(uiDSDsig); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "DSD SIG :%x", uiDSDsig); - - return uiDSDsig; -} - -static int ReadDSDPriority(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val dsd) -{ - /* unsigned int priOffsetInMap = 0 ; */ - unsigned int uiDSDPri = STATUS_FAILURE; - /* struct bcm_dsd_header dsdHeader = {0}; - * priOffsetInMap = (PUCHAR)&(dsdHeader.DSDImagePriority) -(PUCHAR)&dsdHeader; - */ - if (IsSectionWritable(Adapter, dsd)) { - if (ReadDSDSignature(Adapter, dsd) == DSD_IMAGE_MAGIC_NUMBER) { - BcmFlash2xBulkRead(Adapter, - &uiDSDPri, - dsd, - Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImagePriority), - 4); - - uiDSDPri = ntohl(uiDSDPri); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "DSD<%x> Priority :%x", dsd, uiDSDPri); - } - } - - return uiDSDPri; -} - -static enum bcm_flash2x_section_val getHighestPriDSD(struct bcm_mini_adapter *Adapter) -{ - int DSDHighestPri = STATUS_FAILURE; - int DsdPri = 0; - enum bcm_flash2x_section_val HighestPriDSD = 0; - - if (IsSectionWritable(Adapter, DSD2)) { - DSDHighestPri = ReadDSDPriority(Adapter, DSD2); - HighestPriDSD = DSD2; - } - - if (IsSectionWritable(Adapter, DSD1)) { - DsdPri = ReadDSDPriority(Adapter, DSD1); - if (DSDHighestPri < DsdPri) { - DSDHighestPri = DsdPri; - HighestPriDSD = DSD1; - } - } - - if (IsSectionWritable(Adapter, DSD0)) { - DsdPri = ReadDSDPriority(Adapter, DSD0); - if (DSDHighestPri < DsdPri) { - DSDHighestPri = DsdPri; - HighestPriDSD = DSD0; - } - } - if (HighestPriDSD) - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Highest DSD :%x , and its Pri :%x", HighestPriDSD, DSDHighestPri); - - return HighestPriDSD; -} - -static int ReadISOSignature(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val iso) -{ - unsigned int uiISOsig = 0; - /* unsigned int sigoffsetInMap = 0; - * struct bcm_iso_header ISOHeader = {0}; - * sigoffsetInMap =(PUCHAR)&(ISOHeader.ISOImageMagicNumber) -(PUCHAR)&ISOHeader; - */ - if (iso != ISO_IMAGE1 && iso != ISO_IMAGE2) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "passed section value is not for ISOs"); - return STATUS_FAILURE; - } - BcmFlash2xBulkRead(Adapter, - &uiISOsig, - iso, - 0 + FIELD_OFFSET_IN_HEADER(struct bcm_iso_header *, ISOImageMagicNumber), - SIGNATURE_SIZE); - - uiISOsig = ntohl(uiISOsig); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "ISO SIG :%x", uiISOsig); - - return uiISOsig; -} - -static int ReadISOPriority(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val iso) -{ - unsigned int ISOPri = STATUS_FAILURE; - - if (IsSectionWritable(Adapter, iso)) { - if (ReadISOSignature(Adapter, iso) == ISO_IMAGE_MAGIC_NUMBER) { - BcmFlash2xBulkRead(Adapter, - &ISOPri, - iso, - 0 + FIELD_OFFSET_IN_HEADER(struct bcm_iso_header *, ISOImagePriority), - 4); - - ISOPri = ntohl(ISOPri); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "ISO<%x> Priority :%x", iso, ISOPri); - } - } - - return ISOPri; -} - -static enum bcm_flash2x_section_val getHighestPriISO(struct bcm_mini_adapter *Adapter) -{ - int ISOHighestPri = STATUS_FAILURE; - int ISOPri = 0; - enum bcm_flash2x_section_val HighestPriISO = NO_SECTION_VAL; - - if (IsSectionWritable(Adapter, ISO_IMAGE2)) { - ISOHighestPri = ReadISOPriority(Adapter, ISO_IMAGE2); - HighestPriISO = ISO_IMAGE2; - } - - if (IsSectionWritable(Adapter, ISO_IMAGE1)) { - ISOPri = ReadISOPriority(Adapter, ISO_IMAGE1); - if (ISOHighestPri < ISOPri) { - ISOHighestPri = ISOPri; - HighestPriISO = ISO_IMAGE1; - } - } - if (HighestPriISO) - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Highest ISO :%x and its Pri :%x", HighestPriISO, ISOHighestPri); - - return HighestPriISO; -} - -static int WriteToFlashWithoutSectorErase(struct bcm_mini_adapter *Adapter, - PUINT pBuff, - enum bcm_flash2x_section_val eFlash2xSectionVal, - unsigned int uiOffset, - unsigned int uiNumBytes) -{ - #if !defined(BCM_SHM_INTERFACE) || defined(FLASH_DIRECT_ACCESS) - unsigned int uiTemp = 0, value = 0; - unsigned int i = 0; - unsigned int uiPartOffset = 0; - #endif - unsigned int uiStartOffset = 0; - /* Adding section start address */ - int Status = STATUS_SUCCESS; - PUCHAR pcBuff = (PUCHAR)pBuff; - - if (uiNumBytes % Adapter->ulFlashWriteSize) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Writing without Sector Erase for non-FlashWriteSize number of bytes 0x%x\n", uiNumBytes); - return STATUS_FAILURE; - } - - uiStartOffset = BcmGetSectionValStartOffset(Adapter, eFlash2xSectionVal); - - if (IsSectionExistInVendorInfo(Adapter, eFlash2xSectionVal)) - return vendorextnWriteSectionWithoutErase(Adapter, pcBuff, eFlash2xSectionVal, uiOffset, uiNumBytes); - - uiOffset = uiOffset + uiStartOffset; - - #if defined(BCM_SHM_INTERFACE) && !defined(FLASH_DIRECT_ACCESS) - Status = bcmflash_raw_writenoerase((uiOffset / FLASH_PART_SIZE), (uiOffset % FLASH_PART_SIZE), pcBuff, uiNumBytes); - #else - rdmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp)); - value = 0; - wrmalt(Adapter, 0x0f000C80, &value, sizeof(value)); - - Adapter->SelectedChip = RESET_CHIP_SELECT; - BcmDoChipSelect(Adapter, uiOffset); - uiPartOffset = (uiOffset & (FLASH_PART_SIZE - 1)) + GetFlashBaseAddr(Adapter); - - for (i = 0; i < uiNumBytes; i += Adapter->ulFlashWriteSize) { - if (Adapter->ulFlashWriteSize == BYTE_WRITE_SUPPORT) - Status = flashByteWrite(Adapter, uiPartOffset, pcBuff); - else - Status = flashWrite(Adapter, uiPartOffset, pcBuff); - - if (Status != STATUS_SUCCESS) - break; - - pcBuff = pcBuff + Adapter->ulFlashWriteSize; - uiPartOffset = uiPartOffset + Adapter->ulFlashWriteSize; - } - wrmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp)); - Adapter->SelectedChip = RESET_CHIP_SELECT; - #endif - - return Status; -} - -bool IsSectionExistInFlash(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val section) -{ - bool SectionPresent = false; - - switch (section) { - case ISO_IMAGE1: - if ((Adapter->psFlash2xCSInfo->OffsetISOImage1Part1Start != UNINIT_PTR_IN_CS) && - (IsNonCDLessDevice(Adapter) == false)) - SectionPresent = TRUE; - break; - case ISO_IMAGE2: - if ((Adapter->psFlash2xCSInfo->OffsetISOImage2Part1Start != UNINIT_PTR_IN_CS) && - (IsNonCDLessDevice(Adapter) == false)) - SectionPresent = TRUE; - break; - case DSD0: - if (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDStart != UNINIT_PTR_IN_CS) - SectionPresent = TRUE; - break; - case DSD1: - if (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1Start != UNINIT_PTR_IN_CS) - SectionPresent = TRUE; - break; - case DSD2: - if (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2Start != UNINIT_PTR_IN_CS) - SectionPresent = TRUE; - break; - case VSA0: - if (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSAStart != UNINIT_PTR_IN_CS) - SectionPresent = TRUE; - break; - case VSA1: - if (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA1Start != UNINIT_PTR_IN_CS) - SectionPresent = TRUE; - break; - case VSA2: - if (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA2Start != UNINIT_PTR_IN_CS) - SectionPresent = TRUE; - break; - case SCSI: - if (Adapter->psFlash2xCSInfo->OffsetFromZeroForScsiFirmware != UNINIT_PTR_IN_CS) - SectionPresent = TRUE; - break; - case CONTROL_SECTION: - if (Adapter->psFlash2xCSInfo->OffsetFromZeroForControlSectionStart != UNINIT_PTR_IN_CS) - SectionPresent = TRUE; - break; - default: - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section Does not exist in Flash 2.x"); - SectionPresent = false; - } - - return SectionPresent; -} - -static int IsSectionWritable(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val Section) -{ - int offset = STATUS_FAILURE; - int Status = false; - - if (IsSectionExistInFlash(Adapter, Section) == false) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section <%d> does not exist", Section); - return false; - } - - offset = BcmGetSectionValStartOffset(Adapter, Section); - if (offset == INVALID_OFFSET) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section<%d> does not exist", Section); - return false; - } - - if (IsSectionExistInVendorInfo(Adapter, Section)) - return !(Adapter->psFlash2xVendorInfo->VendorSection[Section].AccessFlags & FLASH2X_SECTION_RO); - - Status = IsOffsetWritable(Adapter, offset); - return Status; -} - -static int CorruptDSDSig(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlash2xSectionVal) -{ - PUCHAR pBuff = NULL; - unsigned int sig = 0; - unsigned int uiOffset = 0; - unsigned int BlockStatus = 0; - unsigned int uiSectAlignAddr = 0; - - Adapter->bSigCorrupted = false; - if (Adapter->bAllDSDWriteAllow == false) { - if (IsSectionWritable(Adapter, eFlash2xSectionVal) != TRUE) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section is not Writable...Hence can't Corrupt signature"); - return SECTOR_IS_NOT_WRITABLE; - } - } - - pBuff = kzalloc(MAX_RW_SIZE, GFP_KERNEL); - if (!pBuff) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Can't allocate memorey"); - return -ENOMEM; - } - - uiOffset = Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + sizeof(struct bcm_dsd_header); - uiOffset -= MAX_RW_SIZE; - - BcmFlash2xBulkRead(Adapter, (PUINT)pBuff, eFlash2xSectionVal, uiOffset, MAX_RW_SIZE); - - sig = *((PUINT)(pBuff + 12)); - sig = ntohl(sig); - BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, pBuff, MAX_RW_SIZE); - /* Now corrupting the sig by corrupting 4th last Byte. */ - *(pBuff + 12) = 0; - - if (sig == DSD_IMAGE_MAGIC_NUMBER) { - Adapter->bSigCorrupted = TRUE; - if (Adapter->ulFlashWriteSize == BYTE_WRITE_SUPPORT) { - uiSectAlignAddr = uiOffset & ~(Adapter->uiSectorSize - 1); - BlockStatus = BcmFlashUnProtectBlock(Adapter, uiSectAlignAddr, Adapter->uiSectorSize); - - WriteToFlashWithoutSectorErase(Adapter, (PUINT)(pBuff + 12), eFlash2xSectionVal, - (uiOffset + 12), BYTE_WRITE_SUPPORT); - if (BlockStatus) { - BcmRestoreBlockProtectStatus(Adapter, BlockStatus); - BlockStatus = 0; - } - } else { - WriteToFlashWithoutSectorErase(Adapter, (PUINT)pBuff, eFlash2xSectionVal, - uiOffset, MAX_RW_SIZE); - } - } else { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "BCM Signature is not present in header"); - kfree(pBuff); - - return STATUS_FAILURE; - } - - kfree(pBuff); - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Corrupted the signature"); - - return STATUS_SUCCESS; -} - -static int CorruptISOSig(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlash2xSectionVal) -{ - PUCHAR pBuff = NULL; - unsigned int sig = 0; - unsigned int uiOffset = 0; - - Adapter->bSigCorrupted = false; - - if (IsSectionWritable(Adapter, eFlash2xSectionVal) != TRUE) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section is not Writable...Hence can't Corrupt signature"); - return SECTOR_IS_NOT_WRITABLE; - } - - pBuff = kzalloc(MAX_RW_SIZE, GFP_KERNEL); - if (!pBuff) { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Can't allocate memorey"); - return -ENOMEM; - } - - uiOffset = 0; - - BcmFlash2xBulkRead(Adapter, (PUINT)pBuff, eFlash2xSectionVal, uiOffset, MAX_RW_SIZE); - - sig = *((PUINT)pBuff); - sig = ntohl(sig); - - /* corrupt signature */ - *pBuff = 0; - - if (sig == ISO_IMAGE_MAGIC_NUMBER) { - Adapter->bSigCorrupted = TRUE; - WriteToFlashWithoutSectorErase(Adapter, (PUINT)pBuff, eFlash2xSectionVal, - uiOffset, Adapter->ulFlashWriteSize); - } else { - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "BCM Signature is not present in header"); - kfree(pBuff); - - return STATUS_FAILURE; - } - - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Corrupted the signature"); - BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, pBuff, MAX_RW_SIZE); - - kfree(pBuff); - return STATUS_SUCCESS; -} - -bool IsNonCDLessDevice(struct bcm_mini_adapter *Adapter) -{ - if (Adapter->psFlash2xCSInfo->IsCDLessDeviceBootSig == NON_CDLESS_DEVICE_BOOT_SIG) - return TRUE; - else - return false; -} diff --git a/drivers/staging/bcm/nvm.h b/drivers/staging/bcm/nvm.h deleted file mode 100644 index e765cca5d966..000000000000 --- a/drivers/staging/bcm/nvm.h +++ /dev/null @@ -1,286 +0,0 @@ -/*************************************************************************************** - * - * Copyright (c) Beceem Communications Inc. - * - * Module Name: - * NVM.h - * - * Abstract: - * This file has the prototypes,preprocessors and definitions various NVM libraries. - * - * - * Revision History: - * Who When What - * -------- -------- ---------------------------------------------- - * Name Date Created/reviewed/modified - * - * Notes: - * - ****************************************************************************************/ - -#ifndef _NVM_H_ -#define _NVM_H_ - -struct bcm_flash_cs_info { - u32 MagicNumber; - /* let the magic number be 0xBECE-F1A5 - F1A5 for "flas-h" */ - u32 FlashLayoutVersion; - u32 ISOImageVersion; - u32 SCSIFirmwareVersion; - u32 OffsetFromZeroForPart1ISOImage; - u32 OffsetFromZeroForScsiFirmware; - u32 SizeOfScsiFirmware; - u32 OffsetFromZeroForPart2ISOImage; - u32 OffsetFromZeroForCalibrationStart; - u32 OffsetFromZeroForCalibrationEnd; - u32 OffsetFromZeroForVSAStart; - u32 OffsetFromZeroForVSAEnd; - u32 OffsetFromZeroForControlSectionStart; - u32 OffsetFromZeroForControlSectionData; - u32 CDLessInactivityTimeout; - u32 NewImageSignature; - u32 FlashSectorSizeSig; - u32 FlashSectorSize; - u32 FlashWriteSupportSize; - u32 TotalFlashSize; - u32 FlashBaseAddr; - u32 FlashPartMaxSize; - u32 IsCDLessDeviceBootSig; - /* MSC Timeout after reset to switch from MSC to NW Mode */ - u32 MassStorageTimeout; -}; - -#define FLASH2X_TOTAL_SIZE (64 * 1024 * 1024) -#define DEFAULT_SECTOR_SIZE (64 * 1024) - -struct bcm_flash2x_cs_info { - /* magic number as 0xBECE-F1A5 - F1A5 for "flas-h" */ - u32 MagicNumber; - u32 FlashLayoutVersion; - u32 ISOImageVersion; - u32 SCSIFirmwareVersion; - u32 OffsetFromZeroForPart1ISOImage; - u32 OffsetFromZeroForScsiFirmware; - u32 SizeOfScsiFirmware; - u32 OffsetFromZeroForPart2ISOImage; - u32 OffsetFromZeroForDSDStart; - u32 OffsetFromZeroForDSDEnd; - u32 OffsetFromZeroForVSAStart; - u32 OffsetFromZeroForVSAEnd; - u32 OffsetFromZeroForControlSectionStart; - u32 OffsetFromZeroForControlSectionData; - /* NO Data Activity timeout to switch from MSC to NW Mode */ - u32 CDLessInactivityTimeout; - u32 NewImageSignature; - u32 FlashSectorSizeSig; - u32 FlashSectorSize; - u32 FlashWriteSupportSize; - u32 TotalFlashSize; - u32 FlashBaseAddr; - u32 FlashPartMaxSize; - u32 IsCDLessDeviceBootSig; - /* MSC Timeout after reset to switch from MSC to NW Mode */ - u32 MassStorageTimeout; - /* Flash Map 2.0 Field */ - u32 OffsetISOImage1Part1Start; - u32 OffsetISOImage1Part1End; - u32 OffsetISOImage1Part2Start; - u32 OffsetISOImage1Part2End; - u32 OffsetISOImage1Part3Start; - u32 OffsetISOImage1Part3End; - u32 OffsetISOImage2Part1Start; - u32 OffsetISOImage2Part1End; - u32 OffsetISOImage2Part2Start; - u32 OffsetISOImage2Part2End; - u32 OffsetISOImage2Part3Start; - u32 OffsetISOImage2Part3End; - /* DSD Header offset from start of DSD */ - u32 OffsetFromDSDStartForDSDHeader; - u32 OffsetFromZeroForDSD1Start; - u32 OffsetFromZeroForDSD1End; - u32 OffsetFromZeroForDSD2Start; - u32 OffsetFromZeroForDSD2End; - u32 OffsetFromZeroForVSA1Start; - u32 OffsetFromZeroForVSA1End; - u32 OffsetFromZeroForVSA2Start; - u32 OffsetFromZeroForVSA2End; - /* - * ACCESS_BITS_PER_SECTOR 2 - * ACCESS_RW 0 - * ACCESS_RO 1 - * ACCESS_RESVD 2 - * ACCESS_RESVD 3 - */ - u32 SectorAccessBitMap[FLASH2X_TOTAL_SIZE / (DEFAULT_SECTOR_SIZE * 16)]; - /* All expansions to the control data structure should add here */ -}; - -struct bcm_vendor_section_info { - u32 OffsetFromZeroForSectionStart; - u32 OffsetFromZeroForSectionEnd; - u32 AccessFlags; - u32 Reserved[16]; -}; - -struct bcm_flash2x_vendor_info { - struct bcm_vendor_section_info VendorSection[TOTAL_SECTIONS]; - u32 Reserved[16]; -}; - -struct bcm_dsd_header { - u32 DSDImageSize; - u32 DSDImageCRC; - u32 DSDImagePriority; - /* We should not consider right now. Reading reserve is worthless. */ - u32 Reserved[252]; /* Resvd for DSD Header */ - u32 DSDImageMagicNumber; -}; - -struct bcm_iso_header { - u32 ISOImageMagicNumber; - u32 ISOImageSize; - u32 ISOImageCRC; - u32 ISOImagePriority; - /* We should not consider right now. Reading reserve is worthless. */ - u32 Reserved[60]; /* Resvd for ISO Header extension */ -}; - -#define EEPROM_BEGIN_CIS (0) -#define EEPROM_BEGIN_NON_CIS (0x200) -#define EEPROM_END (0x2000) -#define INIT_PARAMS_SIGNATURE (0x95a7a597) -#define MAX_INIT_PARAMS_LENGTH (2048) -#define MAC_ADDRESS_OFFSET 0x200 - -#define INIT_PARAMS_1_SIGNATURE_ADDRESS EEPROM_BEGIN_NON_CIS -#define INIT_PARAMS_1_DATA_ADDRESS (INIT_PARAMS_1_SIGNATURE_ADDRESS+16) -#define INIT_PARAMS_1_MACADDRESS_ADDRESS (MAC_ADDRESS_OFFSET) -#define INIT_PARAMS_1_LENGTH_ADDRESS (INIT_PARAMS_1_SIGNATURE_ADDRESS+4) - -#define INIT_PARAMS_2_SIGNATURE_ADDRESS (EEPROM_BEGIN_NON_CIS + 2048 + 16) -#define INIT_PARAMS_2_DATA_ADDRESS (INIT_PARAMS_2_SIGNATURE_ADDRESS + 16) -#define INIT_PARAMS_2_MACADDRESS_ADDRESS (INIT_PARAMS_2_SIGNATURE_ADDRESS + 8) -#define INIT_PARAMS_2_LENGTH_ADDRESS (INIT_PARAMS_2_SIGNATURE_ADDRESS + 4) - -#define EEPROM_SPI_DEV_CONFIG_REG 0x0F003000 -#define EEPROM_SPI_Q_STATUS1_REG 0x0F003004 -#define EEPROM_SPI_Q_STATUS1_MASK_REG 0x0F00300C - -#define EEPROM_SPI_Q_STATUS_REG 0x0F003008 -#define EEPROM_CMDQ_SPI_REG 0x0F003018 -#define EEPROM_WRITE_DATAQ_REG 0x0F00301C -#define EEPROM_READ_DATAQ_REG 0x0F003020 -#define SPI_FLUSH_REG 0x0F00304C - -#define EEPROM_WRITE_ENABLE 0x06000000 -#define EEPROM_READ_STATUS_REGISTER 0x05000000 -#define EEPROM_16_BYTE_PAGE_WRITE 0xFA000000 -#define EEPROM_WRITE_QUEUE_EMPTY 0x00001000 -#define EEPROM_WRITE_QUEUE_AVAIL 0x00002000 -#define EEPROM_WRITE_QUEUE_FULL 0x00004000 -#define EEPROM_16_BYTE_PAGE_READ 0xFB000000 -#define EEPROM_4_BYTE_PAGE_READ 0x3B000000 - -#define EEPROM_CMD_QUEUE_FLUSH 0x00000001 -#define EEPROM_WRITE_QUEUE_FLUSH 0x00000002 -#define EEPROM_READ_QUEUE_FLUSH 0x00000004 -#define EEPROM_ETH_QUEUE_FLUSH 0x00000008 -#define EEPROM_ALL_QUEUE_FLUSH 0x0000000f -#define EEPROM_READ_ENABLE 0x06000000 -#define EEPROM_16_BYTE_PAGE_WRITE 0xFA000000 -#define EEPROM_READ_DATA_FULL 0x00000010 -#define EEPROM_READ_DATA_AVAIL 0x00000020 -#define EEPROM_READ_QUEUE_EMPTY 0x00000002 -#define EEPROM_CMD_QUEUE_EMPTY 0x00000100 -#define EEPROM_CMD_QUEUE_AVAIL 0x00000200 -#define EEPROM_CMD_QUEUE_FULL 0x00000400 - -/* Most EEPROM status register bit 0 indicates if the EEPROM is busy - * with a write if set 1. See the details of the EEPROM Status Register - * in the EEPROM data sheet. - */ -#define EEPROM_STATUS_REG_WRITE_BUSY 0x00000001 - -/* We will have 1 mSec for every RETRIES_PER_DELAY count and have a max attempts of MAX_EEPROM_RETRIES - * This will give us 80 mSec minimum of delay = 80mSecs - */ -#define MAX_EEPROM_RETRIES 80 -#define RETRIES_PER_DELAY 64 -#define MAX_RW_SIZE 0x10 -#define MAX_READ_SIZE 0x10 -#define MAX_SECTOR_SIZE (512 * 1024) -#define MIN_SECTOR_SIZE (1024) -#define FLASH_SECTOR_SIZE_OFFSET 0xEFFFC -#define FLASH_SECTOR_SIZE_SIG_OFFSET 0xEFFF8 -#define FLASH_SECTOR_SIZE_SIG 0xCAFEBABE -#define FLASH_CS_INFO_START_ADDR 0xFF0000 -#define FLASH_CONTROL_STRUCT_SIGNATURE 0xBECEF1A5 -#define SCSI_FIRMWARE_MAJOR_VERSION 0x1 -#define SCSI_FIRMWARE_MINOR_VERSION 0x5 -#define BYTE_WRITE_SUPPORT 0x1 -#define FLASH_AUTO_INIT_BASE_ADDR 0xF00000 -#define FLASH_CONTIGIOUS_START_ADDR_AFTER_INIT 0x1C000000 -#define FLASH_CONTIGIOUS_START_ADDR_BEFORE_INIT 0x1F000000 -#define FLASH_CONTIGIOUS_START_ADDR_BCS350 0x08000000 -#define FLASH_CONTIGIOUS_END_ADDR_BCS350 0x08FFFFFF -#define FLASH_SIZE_ADDR 0xFFFFEC -#define FLASH_SPI_CMDQ_REG 0xAF003040 -#define FLASH_SPI_WRITEQ_REG 0xAF003044 -#define FLASH_SPI_READQ_REG 0xAF003048 -#define FLASH_CONFIG_REG 0xAF003050 -#define FLASH_GPIO_CONFIG_REG 0xAF000030 -#define FLASH_CMD_WRITE_ENABLE 0x06 -#define FLASH_CMD_READ_ENABLE 0x03 -#define FLASH_CMD_RESET_WRITE_ENABLE 0x04 -#define FLASH_CMD_STATUS_REG_READ 0x05 -#define FLASH_CMD_STATUS_REG_WRITE 0x01 -#define FLASH_CMD_READ_ID 0x9F -#define PAD_SELECT_REGISTER 0xAF000410 -#define FLASH_PART_SST25VF080B 0xBF258E -#define EEPROM_CAL_DATA_INTERNAL_LOC 0xbFB00008 -#define EEPROM_CALPARAM_START 0x200 -#define EEPROM_SIZE_OFFSET 524 - -/* As Read/Write time vaires from 1.5 to 3.0 ms. - * so After Ignoring the rdm/wrm time(that is dependent on many factor like interface etc.), - * here time calculated meets the worst case delay, 3.0 ms - */ -#define MAX_FLASH_RETRIES 4 -#define FLASH_PER_RETRIES_DELAY 16 -#define EEPROM_MAX_CAL_AREA_SIZE 0xF0000 -#define BECM ntohl(0x4245434d) -#define FLASH_2X_MAJOR_NUMBER 0x2 -#define DSD_IMAGE_MAGIC_NUMBER 0xBECE0D5D -#define ISO_IMAGE_MAGIC_NUMBER 0xBECE0150 -#define NON_CDLESS_DEVICE_BOOT_SIG 0xBECEB007 - -#define MINOR_VERSION(x) ((x >> 16) & 0xFFFF) -#define MAJOR_VERSION(x) (x & 0xFFFF) - -#define CORRUPTED_PATTERN 0x0 -#define UNINIT_PTR_IN_CS 0xBBBBDDDD -#define VENDOR_PTR_IN_CS 0xAAAACCCC -#define FLASH2X_SECTION_PRESENT (1 << 0) -#define FLASH2X_SECTION_VALID (1 << 1) -#define FLASH2X_SECTION_RO (1 << 2) -#define FLASH2X_SECTION_ACT (1 << 3) -#define SECTOR_IS_NOT_WRITABLE STATUS_FAILURE -#define INVALID_OFFSET STATUS_FAILURE -#define INVALID_SECTION STATUS_FAILURE -#define SECTOR_1K 1024 -#define SECTOR_64K (64 * SECTOR_1K) -#define SECTOR_128K (2 * SECTOR_64K) -#define SECTOR_256k (2 * SECTOR_128K) -#define SECTOR_512K (2 * SECTOR_256k) -#define FLASH_PART_SIZE (16 * 1024 * 1024) -#define RESET_CHIP_SELECT -1 -#define CHIP_SELECT_BIT12 12 -#define SECTOR_READWRITE_PERMISSION 0 -#define SECTOR_READONLY 1 -#define SIGNATURE_SIZE 4 -#define DEFAULT_BUFF_SIZE 0x10000 - -#define FIELD_OFFSET_IN_HEADER(HeaderPointer, Field) ((u8 *)&((HeaderPointer)(NULL))->Field - (u8 *)(NULL)) - -#endif - diff --git a/drivers/staging/bcm/sort.c b/drivers/staging/bcm/sort.c deleted file mode 100644 index ca0b17991512..000000000000 --- a/drivers/staging/bcm/sort.c +++ /dev/null @@ -1,52 +0,0 @@ -#include "headers.h" -#include - -/* - * File Name: sort.c - * - * Author: Beceem Communications Pvt. Ltd - * - * Abstract: This file contains the routines sorting the classification rules. - * - * Copyright (c) 2007 Beceem Communications Pvt. Ltd - */ - -static int compare_packet_info(void const *a, void const *b) -{ - struct bcm_packet_info const *pa = a; - struct bcm_packet_info const *pb = b; - - if (!pa->bValid || !pb->bValid) - return 0; - - return pa->u8TrafficPriority - pb->u8TrafficPriority; -} - -VOID SortPackInfo(struct bcm_mini_adapter *Adapter) -{ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, - DBG_LVL_ALL, "<======="); - - sort(Adapter->PackInfo, NO_OF_QUEUES, sizeof(struct bcm_packet_info), - compare_packet_info, NULL); -} - -static int compare_classifiers(void const *a, void const *b) -{ - struct bcm_classifier_rule const *pa = a; - struct bcm_classifier_rule const *pb = b; - - if (!pa->bUsed || !pb->bUsed) - return 0; - - return pa->u8ClassifierRulePriority - pb->u8ClassifierRulePriority; -} - -VOID SortClassifiers(struct bcm_mini_adapter *Adapter) -{ - BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, - DBG_LVL_ALL, "<======="); - - sort(Adapter->astClassifierTable, MAX_CLASSIFIERS, - sizeof(struct bcm_classifier_rule), compare_classifiers, NULL); -} diff --git a/drivers/staging/bcm/target_params.h b/drivers/staging/bcm/target_params.h deleted file mode 100644 index dc45f9ab854d..000000000000 --- a/drivers/staging/bcm/target_params.h +++ /dev/null @@ -1,57 +0,0 @@ -#ifndef TARGET_PARAMS_H -#define TARGET_PARAMS_H - -struct bcm_target_params { - u32 m_u32CfgVersion; - u32 m_u32CenterFrequency; - u32 m_u32BandAScan; - u32 m_u32BandBScan; - u32 m_u32BandCScan; - u32 m_u32ErtpsOptions; - u32 m_u32PHSEnable; - u32 m_u32HoEnable; - u32 m_u32HoReserved1; - u32 m_u32HoReserved2; - u32 m_u32MimoEnable; - u32 m_u32SecurityEnable; - u32 m_u32PowerSavingModesEnable; /* bit 1: 1 Idlemode enable; bit2: 1 Sleepmode Enable */ - /* PowerSaving Mode Options: - * bit 0 = 1: CPE mode - to keep pcmcia if alive; - * bit 1 = 1: CINR reporting in Idlemode Msg - * bit 2 = 1: Default PSC Enable in sleepmode - */ - u32 m_u32PowerSavingModeOptions; - u32 m_u32ArqEnable; - /* From Version #3, the HARQ section renamed as general */ - u32 m_u32HarqEnable; - u32 m_u32EEPROMFlag; - /* BINARY TYPE - 4th MSByte: Interface Type - 3rd MSByte: Vendor Type - 2nd MSByte - * Unused - LSByte - */ - u32 m_u32Customize; - u32 m_u32ConfigBW; /* In Hz */ - u32 m_u32ShutDownInitThresholdTimer; - u32 m_u32RadioParameter; - u32 m_u32PhyParameter1; - u32 m_u32PhyParameter2; - u32 m_u32PhyParameter3; - u32 m_u32TestOptions; /* in eval mode only; lower 16bits = basic cid for testing; then bit 16 is test cqich,bit 17 test init rang; bit 18 test periodic rang and bit 19 is test harq ack/nack */ - u32 m_u32MaxMACDataperDLFrame; - u32 m_u32MaxMACDataperULFrame; - u32 m_u32Corr2MacFlags; - u32 HostDrvrConfig1; - u32 HostDrvrConfig2; - u32 HostDrvrConfig3; - u32 HostDrvrConfig4; - u32 HostDrvrConfig5; - u32 HostDrvrConfig6; - u32 m_u32SegmentedPUSCenable; - /* removed SHUT down related 'unused' params from here to sync 4.x and 5.x CFG files.. - * BAMC Related Parameters - * Bit 0-15 Band AMC signaling configuration: Bit 1 = 1 – Enable Band AMC signaling. - * bit 16-31 Band AMC Data configuration: Bit 16 = 1 – Band AMC 2x3 support. - */ - u32 m_u32BandAMCEnable; -}; - -#endif diff --git a/drivers/staging/bcm/vendorspecificextn.c b/drivers/staging/bcm/vendorspecificextn.c deleted file mode 100644 index 1d9bef6e4273..000000000000 --- a/drivers/staging/bcm/vendorspecificextn.c +++ /dev/null @@ -1,145 +0,0 @@ -#include "headers.h" -/* - * Procedure: vendorextnGetSectionInfo - * - * Description: Finds the type of NVM used. - * - * Arguments: - * Adapter - ptr to Adapter object instance - * pNVMType - ptr to NVM type. - * Returns: - * STATUS_SUCCESS/STATUS_FAILURE - * - */ -INT vendorextnGetSectionInfo(PVOID pContext, - struct bcm_flash2x_vendor_info *pVendorInfo) -{ - return STATUS_FAILURE; -} - -/* - * Procedure: vendorextnInit - * - * Description: Initializing the vendor extension NVM interface - * - * Arguments: - * Adapter - Pointer to MINI Adapter Structure - * Returns: - * STATUS_SUCCESS/STATUS_FAILURE - * - * - */ -INT vendorextnInit(struct bcm_mini_adapter *Adapter) -{ - return STATUS_SUCCESS; -} - -/* - * Procedure: vendorextnExit - * - * Description: Free the resource associated with vendor extension NVM interface - * - * Arguments: - * - * Returns: - * STATUS_SUCCESS/STATUS_FAILURE - * - * - */ -INT vendorextnExit(struct bcm_mini_adapter *Adapter) -{ - return STATUS_SUCCESS; -} - -/* - * Procedure: vendorextnIoctl - * - * Description: execute the vendor extension specific ioctl - * - * Arguments: - * Adapter -Beceem private Adapter Structure - * cmd -vendor extension specific Ioctl commad - * arg -input parameter sent by vendor - * - * Returns: - * CONTINUE_COMMON_PATH in case it is not meant to be processed - * by vendor ioctls - * STATUS_SUCCESS/STATUS_FAILURE as per the IOCTL return value - */ - -INT vendorextnIoctl(struct bcm_mini_adapter *Adapter, UINT cmd, ULONG arg) -{ - return CONTINUE_COMMON_PATH; -} - - - -/* - * Procedure: vendorextnReadSection - * - * Description: Reads from a section of NVM - * - * Arguments: - * pContext - ptr to Adapter object instance - * pBuffer - Read the data from Vendor Area to this buffer - * SectionVal - Value of type of Section - * Offset - Read from the Offset of the Vendor Section. - * numOfBytes - Read numOfBytes from the Vendor section to Buffer - * - * Returns: - * STATUS_SUCCESS/STATUS_FAILURE - */ - -INT vendorextnReadSection(PVOID pContext, PUCHAR pBuffer, - enum bcm_flash2x_section_val SectionVal, UINT offset, UINT numOfBytes) -{ - return STATUS_FAILURE; -} - - - -/* - * Procedure: vendorextnWriteSection - * - * Description: Write to a Section of NVM - * - * Arguments: - * pContext - ptr to Adapter object instance - * pBuffer - Write the data provided in the buffer - * SectionVal - Value of type of Section - * Offset - Writes to the Offset of the Vendor Section. - * numOfBytes - Write num Bytes after reading from pBuffer. - * bVerify - the Buffer Written should be verified. - * - * Returns: - * STATUS_SUCCESS/STATUS_FAILURE - */ -INT vendorextnWriteSection(PVOID pContext, PUCHAR pBuffer, - enum bcm_flash2x_section_val SectionVal, UINT offset, - UINT numOfBytes, bool bVerify) -{ - return STATUS_FAILURE; -} - - - -/* - * Procedure: vendorextnWriteSectionWithoutErase - * - * Description: Write to a Section of NVM without erasing the sector - * - * Arguments: - * pContext - ptr to Adapter object instance - * pBuffer - Write the data provided in the buffer - * SectionVal - Value of type of Section - * Offset - Writes to the Offset of the Vendor Section. - * numOfBytes - Write num Bytes after reading from pBuffer. - * - * Returns: - * STATUS_SUCCESS/STATUS_FAILURE - */ -INT vendorextnWriteSectionWithoutErase(PVOID pContext, PUCHAR pBuffer, - enum bcm_flash2x_section_val SectionVal, UINT offset, UINT numOfBytes) -{ - return STATUS_FAILURE; -} diff --git a/drivers/staging/bcm/vendorspecificextn.h b/drivers/staging/bcm/vendorspecificextn.h deleted file mode 100644 index ff57f0570451..000000000000 --- a/drivers/staging/bcm/vendorspecificextn.h +++ /dev/null @@ -1,18 +0,0 @@ - -#ifndef __VENDOR_EXTN_NVM_H__ -#define __VENDOR_EXTN_NVM_H__ - -#define CONTINUE_COMMON_PATH 0xFFFF - -INT vendorextnGetSectionInfo(PVOID pContext, struct bcm_flash2x_vendor_info *pVendorInfo); -INT vendorextnExit(struct bcm_mini_adapter *Adapter); -INT vendorextnInit(struct bcm_mini_adapter *Adapter); -INT vendorextnIoctl(struct bcm_mini_adapter *Adapter, UINT cmd, ULONG arg); -INT vendorextnReadSection(PVOID pContext, PUCHAR pBuffer, enum bcm_flash2x_section_val SectionVal, - UINT offset, UINT numOfBytes); -INT vendorextnWriteSection(PVOID pContext, PUCHAR pBuffer, enum bcm_flash2x_section_val SectionVal, - UINT offset, UINT numOfBytes, bool bVerify); -INT vendorextnWriteSectionWithoutErase(PVOID pContext, PUCHAR pBuffer, enum bcm_flash2x_section_val SectionVal, - UINT offset, UINT numOfBytes); - -#endif /* */ -- cgit v1.2.3-59-g8ed1b From c11f042b8607f38410351fb30c224783f2b24fb7 Mon Sep 17 00:00:00 2001 From: David Cohen Date: Thu, 16 Oct 2014 19:06:59 -0700 Subject: pinctrl: use linux-gpio mailing list The GPIO concepts are close enough to pin control that we may use the same mailing list to discuss them. Signed-off-by: Linus Walleij --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index a20df9bf8ab0..d7df7913c618 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7153,6 +7153,7 @@ F: drivers/crypto/picoxcell* PIN CONTROL SUBSYSTEM M: Linus Walleij +L: linux-gpio@vger.kernel.org S: Maintained F: drivers/pinctrl/ F: include/linux/pinctrl/ -- cgit v1.2.3-59-g8ed1b From 915f389d9c52c5a2d6946ffc252d68bfd6c5dfee Mon Sep 17 00:00:00 2001 From: Paul Moore Date: Mon, 20 Oct 2014 11:59:43 -0400 Subject: audit: add Paul Moore to the MAINTAINERS entry After a long stint maintaining the audit tree, Eric asked me to step in and handle the day-to-day management of the audit tree. We should also update the linux-audit mailing list entry to better reflect current usage. Signed-off-by: Paul Moore Acked-by: Eric Paris --- MAINTAINERS | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index c2066f4c3286..86c24fd147d1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1689,10 +1689,11 @@ S: Supported F: drivers/scsi/esas2r AUDIT SUBSYSTEM +M: Paul Moore M: Eric Paris -L: linux-audit@redhat.com (subscribers-only) +L: linux-audit@redhat.com (moderated for non-subscribers) W: http://people.redhat.com/sgrubb/audit/ -T: git git://git.infradead.org/users/eparis/audit.git +T: git git://git.infradead.org/users/pcmoore/audit S: Maintained F: include/linux/audit.h F: include/uapi/linux/audit.h -- cgit v1.2.3-59-g8ed1b From d1e66e6e45099d3604fb8e3544281520e9fb8958 Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Fri, 17 Oct 2014 19:19:59 -0400 Subject: MAINTAINERS: nx-842 driver maintainer change Change maintainer of nx-842 compression coprocessor driver to Dan Streetman. Signed-off-by: Dan Streetman Acked-by: Nathan Fontenot Signed-off-by: Michael Ellerman --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 0484f2c526f0..d333c3e4abeb 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4601,7 +4601,7 @@ S: Supported F: drivers/crypto/nx/ IBM Power 842 compression accelerator -M: Nathan Fontenot +M: Dan Streetman S: Supported F: drivers/crypto/nx/nx-842.c F: include/linux/nx842.h -- cgit v1.2.3-59-g8ed1b From 0ef090151345e693bd9b50c9b7aaf34ae5e9cac3 Mon Sep 17 00:00:00 2001 From: Bo Shen Date: Sun, 28 Sep 2014 09:57:18 +0800 Subject: MAINTAINERS: add atmel ssc driver maintainer entry Signed-off-by: Bo Shen Acked-by: Nicolas Ferre --- MAINTAINERS | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index a20df9bf8ab0..71fdbd4f4ac6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1749,6 +1749,13 @@ M: Nicolas Ferre S: Supported F: drivers/spi/spi-atmel.* +ATMEL SSC DRIVER +M: Bo Shen +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Supported +F: drivers/misc/atmel-ssc.c +F: include/linux/atmel-ssc.h + ATMEL Timer Counter (TC) AND CLOCKSOURCE DRIVERS M: Nicolas Ferre L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -- cgit v1.2.3-59-g8ed1b From 7c37fbda85ceb9be7bdb9d5f53e702efc40cf783 Mon Sep 17 00:00:00 2001 From: Neil Brown Date: Fri, 24 Oct 2014 00:14:39 +0200 Subject: overlay: overlay filesystem documentation Document the overlay filesystem. Signed-off-by: Miklos Szeredi --- Documentation/filesystems/overlayfs.txt | 198 ++++++++++++++++++++++++++++++++ MAINTAINERS | 7 ++ 2 files changed, 205 insertions(+) create mode 100644 Documentation/filesystems/overlayfs.txt (limited to 'MAINTAINERS') diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt new file mode 100644 index 000000000000..530850a72735 --- /dev/null +++ b/Documentation/filesystems/overlayfs.txt @@ -0,0 +1,198 @@ +Written by: Neil Brown + +Overlay Filesystem +================== + +This document describes a prototype for a new approach to providing +overlay-filesystem functionality in Linux (sometimes referred to as +union-filesystems). An overlay-filesystem tries to present a +filesystem which is the result over overlaying one filesystem on top +of the other. + +The result will inevitably fail to look exactly like a normal +filesystem for various technical reasons. The expectation is that +many use cases will be able to ignore these differences. + +This approach is 'hybrid' because the objects that appear in the +filesystem do not all appear to belong to that filesystem. In many +cases an object accessed in the union will be indistinguishable +from accessing the corresponding object from the original filesystem. +This is most obvious from the 'st_dev' field returned by stat(2). + +While directories will report an st_dev from the overlay-filesystem, +all non-directory objects will report an st_dev from the lower or +upper filesystem that is providing the object. Similarly st_ino will +only be unique when combined with st_dev, and both of these can change +over the lifetime of a non-directory object. Many applications and +tools ignore these values and will not be affected. + +Upper and Lower +--------------- + +An overlay filesystem combines two filesystems - an 'upper' filesystem +and a 'lower' filesystem. When a name exists in both filesystems, the +object in the 'upper' filesystem is visible while the object in the +'lower' filesystem is either hidden or, in the case of directories, +merged with the 'upper' object. + +It would be more correct to refer to an upper and lower 'directory +tree' rather than 'filesystem' as it is quite possible for both +directory trees to be in the same filesystem and there is no +requirement that the root of a filesystem be given for either upper or +lower. + +The lower filesystem can be any filesystem supported by Linux and does +not need to be writable. The lower filesystem can even be another +overlayfs. The upper filesystem will normally be writable and if it +is it must support the creation of trusted.* extended attributes, and +must provide valid d_type in readdir responses, so NFS is not suitable. + +A read-only overlay of two read-only filesystems may use any +filesystem type. + +Directories +----------- + +Overlaying mainly involves directories. If a given name appears in both +upper and lower filesystems and refers to a non-directory in either, +then the lower object is hidden - the name refers only to the upper +object. + +Where both upper and lower objects are directories, a merged directory +is formed. + +At mount time, the two directories given as mount options "lowerdir" and +"upperdir" are combined into a merged directory: + + mount -t overlayfs overlayfs -olowerdir=/lower,upperdir=/upper,\ +workdir=/work /merged + +The "workdir" needs to be an empty directory on the same filesystem +as upperdir. + +Then whenever a lookup is requested in such a merged directory, the +lookup is performed in each actual directory and the combined result +is cached in the dentry belonging to the overlay filesystem. If both +actual lookups find directories, both are stored and a merged +directory is created, otherwise only one is stored: the upper if it +exists, else the lower. + +Only the lists of names from directories are merged. Other content +such as metadata and extended attributes are reported for the upper +directory only. These attributes of the lower directory are hidden. + +whiteouts and opaque directories +-------------------------------- + +In order to support rm and rmdir without changing the lower +filesystem, an overlay filesystem needs to record in the upper filesystem +that files have been removed. This is done using whiteouts and opaque +directories (non-directories are always opaque). + +A whiteout is created as a character device with 0/0 device number. +When a whiteout is found in the upper level of a merged directory, any +matching name in the lower level is ignored, and the whiteout itself +is also hidden. + +A directory is made opaque by setting the xattr "trusted.overlay.opaque" +to "y". Where the upper filesystem contains an opaque directory, any +directory in the lower filesystem with the same name is ignored. + +readdir +------- + +When a 'readdir' request is made on a merged directory, the upper and +lower directories are each read and the name lists merged in the +obvious way (upper is read first, then lower - entries that already +exist are not re-added). This merged name list is cached in the +'struct file' and so remains as long as the file is kept open. If the +directory is opened and read by two processes at the same time, they +will each have separate caches. A seekdir to the start of the +directory (offset 0) followed by a readdir will cause the cache to be +discarded and rebuilt. + +This means that changes to the merged directory do not appear while a +directory is being read. This is unlikely to be noticed by many +programs. + +seek offsets are assigned sequentially when the directories are read. +Thus if + - read part of a directory + - remember an offset, and close the directory + - re-open the directory some time later + - seek to the remembered offset + +there may be little correlation between the old and new locations in +the list of filenames, particularly if anything has changed in the +directory. + +Readdir on directories that are not merged is simply handled by the +underlying directory (upper or lower). + + +Non-directories +--------------- + +Objects that are not directories (files, symlinks, device-special +files etc.) are presented either from the upper or lower filesystem as +appropriate. When a file in the lower filesystem is accessed in a way +the requires write-access, such as opening for write access, changing +some metadata etc., the file is first copied from the lower filesystem +to the upper filesystem (copy_up). Note that creating a hard-link +also requires copy_up, though of course creation of a symlink does +not. + +The copy_up may turn out to be unnecessary, for example if the file is +opened for read-write but the data is not modified. + +The copy_up process first makes sure that the containing directory +exists in the upper filesystem - creating it and any parents as +necessary. It then creates the object with the same metadata (owner, +mode, mtime, symlink-target etc.) and then if the object is a file, the +data is copied from the lower to the upper filesystem. Finally any +extended attributes are copied up. + +Once the copy_up is complete, the overlay filesystem simply +provides direct access to the newly created file in the upper +filesystem - future operations on the file are barely noticed by the +overlay filesystem (though an operation on the name of the file such as +rename or unlink will of course be noticed and handled). + + +Non-standard behavior +--------------------- + +The copy_up operation essentially creates a new, identical file and +moves it over to the old name. The new file may be on a different +filesystem, so both st_dev and st_ino of the file may change. + +Any open files referring to this inode will access the old data and +metadata. Similarly any file locks obtained before copy_up will not +apply to the copied up file. + +On a file opened with O_RDONLY fchmod(2), fchown(2), futimesat(2) and +fsetxattr(2) will fail with EROFS. + +If a file with multiple hard links is copied up, then this will +"break" the link. Changes will not be propagated to other names +referring to the same inode. + +Symlinks in /proc/PID/ and /proc/PID/fd which point to a non-directory +object in overlayfs will not contain valid absolute paths, only +relative paths leading up to the filesystem's root. This will be +fixed in the future. + +Some operations are not atomic, for example a crash during copy_up or +rename will leave the filesystem in an inconsistent state. This will +be addressed in the future. + +Changes to underlying filesystems +--------------------------------- + +Offline changes, when the overlay is not mounted, are allowed to either +the upper or the lower trees. + +Changes to the underlying filesystems while part of a mounted overlay +filesystem are not allowed. If the underlying filesystem is changed, +the behavior of the overlay is undefined, though it will not result in +a crash or deadlock. diff --git a/MAINTAINERS b/MAINTAINERS index a20df9bf8ab0..aa974d445bfd 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6832,6 +6832,13 @@ F: drivers/scsi/osd/ F: include/scsi/osd_* F: fs/exofs/ +OVERLAYFS FILESYSTEM +M: Miklos Szeredi +L: linux-fsdevel@vger.kernel.org +S: Supported +F: fs/overlayfs/* +F: Documentation/filesystems/overlayfs.txt + P54 WIRELESS DRIVER M: Christian Lamparter L: linux-wireless@vger.kernel.org -- cgit v1.2.3-59-g8ed1b From 580947d395c3827af805573c5fafa87dc01b13bb Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sat, 25 Oct 2014 09:41:05 +0200 Subject: MAINTAINERS: add missing headers in 802.15.4 This patch adds a lot of include headers which are missing by the current IEEE 802.15.4 subsystem. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- MAINTAINERS | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 43898b1a8a2d..15867a680422 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4689,6 +4689,13 @@ S: Maintained F: net/ieee802154/ F: net/mac802154/ F: drivers/net/ieee802154/ +F: include/linux/nl802154.h +F: include/linux/ieee802154.h +F: include/net/nl802154.h +F: include/net/mac802154.h +F: include/net/af_ieee802154.h +F: include/net/cfg802154.h +F: include/net/ieee802154_netdev.h F: Documentation/networking/ieee802154.txt IGUANAWORKS USB IR TRANSCEIVER -- cgit v1.2.3-59-g8ed1b From 693373db508b13495f3507957241950c1668d1cf Mon Sep 17 00:00:00 2001 From: Don Brace Date: Wed, 15 Oct 2014 22:32:41 +0000 Subject: MAINTAINERS: change hpsa and cciss maintainer Change ownership of the hpsa driver from Stephen M. Cameron (Hewlett-Packard) to Don Brace (PMC-Sierra). Change ownership of the cciss driver from Mike Miller (Hewlett-Packard) to Don Brace (PMC-Sierra). Signed-off-by: Don Brace Reviewed-by: Robert Elliott Signed-off-by: Christoph Hellwig --- MAINTAINERS | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index dab92a78d1d5..3d70e453af30 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4312,8 +4312,10 @@ F: Documentation/blockdev/cpqarray.txt F: drivers/block/cpqarray.* HEWLETT-PACKARD SMART ARRAY RAID DRIVER (hpsa) -M: "Stephen M. Cameron" +M: Don Brace L: iss_storagedev@hp.com +L: storagedev@pmcs.com +L: linux-scsi@vger.kernel.org S: Supported F: Documentation/scsi/hpsa.txt F: drivers/scsi/hpsa*.[ch] @@ -4321,8 +4323,10 @@ F: include/linux/cciss*.h F: include/uapi/linux/cciss*.h HEWLETT-PACKARD SMART CISS RAID DRIVER (cciss) -M: Mike Miller +M: Don Brace L: iss_storagedev@hp.com +L: storagedev@pmcs.com +L: linux-scsi@vger.kernel.org S: Supported F: Documentation/blockdev/cciss.txt F: drivers/block/cciss* -- cgit v1.2.3-59-g8ed1b From ff150a76ae782a2d8730945c869869f77f2fcd39 Mon Sep 17 00:00:00 2001 From: Santosh Y Date: Sat, 25 Oct 2014 10:56:27 +0530 Subject: MAINTAINERS: ufs - remove self I have moved, I do not have the hardware access anymore. Signed-off-by: Santosh Y Signed-off-by: Christoph Hellwig --- MAINTAINERS | 1 - 1 file changed, 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 3d70e453af30..82d4ad20197c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9610,7 +9610,6 @@ F: drivers/staging/unisys/ UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER M: Vinayak Holikatti -M: Santosh Y L: linux-scsi@vger.kernel.org S: Supported F: Documentation/scsi/ufs.txt -- cgit v1.2.3-59-g8ed1b From f0fd9ad87e698d634ab5894f751b7af908e96a41 Mon Sep 17 00:00:00 2001 From: Soren Brinkmann Date: Thu, 23 Oct 2014 09:29:22 -0700 Subject: MAINTAINERS: Add Soren as reviewer for Zynq Signed-off-by: Soren Brinkmann Acked-by: Michal Simek Signed-off-by: Arnd Bergmann --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 71fdbd4f4ac6..7b765a42e2df 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1543,6 +1543,7 @@ F: arch/arm/mach-pxa/include/mach/z2.h ARM/ZYNQ ARCHITECTURE M: Michal Simek +R: Sören Brinkmann L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) W: http://wiki.xilinx.com T: git git://git.xilinx.com/linux-xlnx.git -- cgit v1.2.3-59-g8ed1b From c654b6bf2cda7e051d1d5ba7e696855511f1105a Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Fri, 17 Oct 2014 11:49:40 +0200 Subject: pinctrl: at91: use own header Copy the mach/at91_pio.h header locally and use it for pinctrl-at91.c. This allows to remove the dependency on mach/at91_pio.h to be able to move at91 to multiplatform. Signed-off-by: Alexandre Belloni Acked-by: Nicolas Ferre Signed-off-by: Linus Walleij --- MAINTAINERS | 2 +- drivers/pinctrl/pinctrl-at91.c | 4 +-- drivers/pinctrl/pinctrl-at91.h | 72 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 74 insertions(+), 4 deletions(-) create mode 100644 drivers/pinctrl/pinctrl-at91.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index a20df9bf8ab0..c017d90bc739 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7161,7 +7161,7 @@ PIN CONTROLLER - ATMEL AT91 M: Jean-Christophe Plagniol-Villard L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -F: drivers/pinctrl/pinctrl-at91.c +F: drivers/pinctrl/pinctrl-at91.* PIN CONTROLLER - RENESAS M: Laurent Pinchart diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index 354a81d40925..94643bbaee37 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c @@ -25,9 +25,7 @@ /* Since we request GPIOs from ourself */ #include -#include -#include - +#include "pinctrl-at91.h" #include "core.h" #define MAX_GPIO_BANKS 5 diff --git a/drivers/pinctrl/pinctrl-at91.h b/drivers/pinctrl/pinctrl-at91.h new file mode 100644 index 000000000000..79b957f1dfa2 --- /dev/null +++ b/drivers/pinctrl/pinctrl-at91.h @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2005 Ivan Kokshaysky + * Copyright (C) SAN People + * + * Parallel I/O Controller (PIO) - System peripherals registers. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __PINCTRL_AT91_H +#define __PINCTRL_AT91_H + +#define PIO_PER 0x00 /* Enable Register */ +#define PIO_PDR 0x04 /* Disable Register */ +#define PIO_PSR 0x08 /* Status Register */ +#define PIO_OER 0x10 /* Output Enable Register */ +#define PIO_ODR 0x14 /* Output Disable Register */ +#define PIO_OSR 0x18 /* Output Status Register */ +#define PIO_IFER 0x20 /* Glitch Input Filter Enable */ +#define PIO_IFDR 0x24 /* Glitch Input Filter Disable */ +#define PIO_IFSR 0x28 /* Glitch Input Filter Status */ +#define PIO_SODR 0x30 /* Set Output Data Register */ +#define PIO_CODR 0x34 /* Clear Output Data Register */ +#define PIO_ODSR 0x38 /* Output Data Status Register */ +#define PIO_PDSR 0x3c /* Pin Data Status Register */ +#define PIO_IER 0x40 /* Interrupt Enable Register */ +#define PIO_IDR 0x44 /* Interrupt Disable Register */ +#define PIO_IMR 0x48 /* Interrupt Mask Register */ +#define PIO_ISR 0x4c /* Interrupt Status Register */ +#define PIO_MDER 0x50 /* Multi-driver Enable Register */ +#define PIO_MDDR 0x54 /* Multi-driver Disable Register */ +#define PIO_MDSR 0x58 /* Multi-driver Status Register */ +#define PIO_PUDR 0x60 /* Pull-up Disable Register */ +#define PIO_PUER 0x64 /* Pull-up Enable Register */ +#define PIO_PUSR 0x68 /* Pull-up Status Register */ +#define PIO_ASR 0x70 /* Peripheral A Select Register */ +#define PIO_ABCDSR1 0x70 /* Peripheral ABCD Select Register 1 [some sam9 only] */ +#define PIO_BSR 0x74 /* Peripheral B Select Register */ +#define PIO_ABCDSR2 0x74 /* Peripheral ABCD Select Register 2 [some sam9 only] */ +#define PIO_ABSR 0x78 /* AB Status Register */ +#define PIO_IFSCDR 0x80 /* Input Filter Slow Clock Disable Register */ +#define PIO_IFSCER 0x84 /* Input Filter Slow Clock Enable Register */ +#define PIO_IFSCSR 0x88 /* Input Filter Slow Clock Status Register */ +#define PIO_SCDR 0x8c /* Slow Clock Divider Debouncing Register */ +#define PIO_SCDR_DIV (0x3fff << 0) /* Slow Clock Divider Mask */ +#define PIO_PPDDR 0x90 /* Pad Pull-down Disable Register */ +#define PIO_PPDER 0x94 /* Pad Pull-down Enable Register */ +#define PIO_PPDSR 0x98 /* Pad Pull-down Status Register */ +#define PIO_OWER 0xa0 /* Output Write Enable Register */ +#define PIO_OWDR 0xa4 /* Output Write Disable Register */ +#define PIO_OWSR 0xa8 /* Output Write Status Register */ +#define PIO_AIMER 0xb0 /* Additional Interrupt Modes Enable Register */ +#define PIO_AIMDR 0xb4 /* Additional Interrupt Modes Disable Register */ +#define PIO_AIMMR 0xb8 /* Additional Interrupt Modes Mask Register */ +#define PIO_ESR 0xc0 /* Edge Select Register */ +#define PIO_LSR 0xc4 /* Level Select Register */ +#define PIO_ELSR 0xc8 /* Edge/Level Status Register */ +#define PIO_FELLSR 0xd0 /* Falling Edge/Low Level Select Register */ +#define PIO_REHLSR 0xd4 /* Rising Edge/ High Level Select Register */ +#define PIO_FRLHSR 0xd8 /* Fall/Rise - Low/High Status Register */ +#define PIO_SCHMITT 0x100 /* Schmitt Trigger Register */ + +#define SAMA5D3_PIO_DRIVER1 0x118 /*PIO Driver 1 register offset*/ +#define SAMA5D3_PIO_DRIVER2 0x11C /*PIO Driver 2 register offset*/ + +#define AT91SAM9X5_PIO_DRIVER1 0x114 /*PIO Driver 1 register offset*/ +#define AT91SAM9X5_PIO_DRIVER2 0x118 /*PIO Driver 2 register offset*/ + +#endif -- cgit v1.2.3-59-g8ed1b From cbd1b6529f07308bc4921a088b0f83895e403109 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Fri, 24 Oct 2014 15:16:53 +0300 Subject: MAINTAINERS: Add entry for Intel pin controller drivers Add MAINTAINERS entry for Intel pin controller drivers. I will be maintaining them with Heikki, who kindly promised to help me with this. Signed-off-by: Heikki Krogerus Signed-off-by: Mika Westerberg Signed-off-by: Linus Walleij --- MAINTAINERS | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index c017d90bc739..f8042cfa4f6a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7163,6 +7163,12 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: drivers/pinctrl/pinctrl-at91.* +PIN CONTROLLER - INTEL +M: Mika Westerberg +M: Heikki Krogerus +S: Maintained +F: drivers/pinctrl/intel/ + PIN CONTROLLER - RENESAS M: Laurent Pinchart L: linux-sh@vger.kernel.org -- cgit v1.2.3-59-g8ed1b From ef0bbac33dc051057237d1dac82e1d218b0cf306 Mon Sep 17 00:00:00 2001 From: Varka Bhadram Date: Wed, 29 Oct 2014 16:15:22 +0530 Subject: MAINTAINERS: add cc2520 driver maintainer This patch adds maintainer for cc2520 radio driver Signed-off-by: Varka Bhadram Acked-by: Alexander Aring Signed-off-by: Marcel Holtmann --- MAINTAINERS | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 15867a680422..7ec37a396ffe 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2302,6 +2302,14 @@ F: security/capability.c F: security/commoncap.c F: kernel/capability.c +CC2520 IEEE-802.15.4 RADIO DRIVER +M: Varka Bhadram +L: linux-wpan@vger.kernel.org +S: Maintained +F: drivers/net/ieee802154/cc2520.c +F: include/linux/spi/cc2520.h +F: Documentation/devicetree/bindings/net/ieee802154/cc2520.txt + CELL BROADBAND ENGINE ARCHITECTURE M: Arnd Bergmann L: linuxppc-dev@lists.ozlabs.org -- cgit v1.2.3-59-g8ed1b From b33a8e137ef62ad8280a58c4aeadd1ffcae89598 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 29 Oct 2014 15:28:36 +0100 Subject: MAINTAINERS: Remove reference to shmobile / koelsch_defconfig koelsch_defconfig has been removed. Signed-off-by: Geert Uytterhoeven Signed-off-by: Simon Horman --- MAINTAINERS | 1 - 1 file changed, 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index a20df9bf8ab0..3f73fb3c879e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1381,7 +1381,6 @@ F: arch/arm/boot/dts/sh* F: arch/arm/configs/ape6evm_defconfig F: arch/arm/configs/armadillo800eva_defconfig F: arch/arm/configs/bockw_defconfig -F: arch/arm/configs/koelsch_defconfig F: arch/arm/configs/kzm9g_defconfig F: arch/arm/configs/lager_defconfig F: arch/arm/configs/mackerel_defconfig -- cgit v1.2.3-59-g8ed1b From cc040ba269ae6972face1dc7376ab3eaab9f64c8 Mon Sep 17 00:00:00 2001 From: "Lad, Prabhakar" Date: Wed, 29 Oct 2014 09:44:21 +0000 Subject: MAINTAINERS: drop list entry for davinci As davinci-linux-open-source@linux.davincidsp.com is now shut and no more maintained by TI, drop this entry from DAVINCI MACHINE SUPPORT and DAVINCI SERIES MEDIA DRIVER. Signed-off-by: Lad, Prabhakar Cc: Kevin Hilman Cc: arm@kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Acked-by: Sekhar Nori Signed-off-by: Kevin Hilman --- MAINTAINERS | 2 -- 1 file changed, 2 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 7b765a42e2df..f54ff97d621b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8465,7 +8465,6 @@ F: arch/arm/mach-s3c24xx/bast-irq.c TI DAVINCI MACHINE SUPPORT M: Sekhar Nori M: Kevin Hilman -L: davinci-linux-open-source@linux.davincidsp.com (moderated for non-subscribers) T: git git://gitorious.org/linux-davinci/linux-davinci.git Q: http://patchwork.kernel.org/project/linux-davinci/list/ S: Supported @@ -8475,7 +8474,6 @@ F: drivers/i2c/busses/i2c-davinci.c TI DAVINCI SERIES MEDIA DRIVER M: Lad, Prabhakar L: linux-media@vger.kernel.org -L: davinci-linux-open-source@linux.davincidsp.com (moderated for non-subscribers) W: http://linuxtv.org/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/mhadli/v4l-dvb-davinci_devices.git -- cgit v1.2.3-59-g8ed1b From 3ad50cca3919c08472232a633806f591216732b9 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Wed, 29 Oct 2014 10:44:56 -0700 Subject: net: dsa: Add support for Marvell 88E6352 Marvell 88E6352 is mostly compatible to MV88E6123/61/65, but requires indirect phy access. Also, its configuration registers are a bit different. Signed-off-by: Guenter Roeck Signed-off-by: David S. Miller --- MAINTAINERS | 5 + drivers/net/dsa/Kconfig | 8 + drivers/net/dsa/Makefile | 3 + drivers/net/dsa/mv88e6352.c | 464 ++++++++++++++++++++++++++++++++++++++++++++ drivers/net/dsa/mv88e6xxx.c | 3 + drivers/net/dsa/mv88e6xxx.h | 7 + 6 files changed, 490 insertions(+) create mode 100644 drivers/net/dsa/mv88e6352.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 43898b1a8a2d..e4082ec8daeb 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5848,6 +5848,11 @@ M: Russell King S: Maintained F: drivers/gpu/drm/armada/ +MARVELL 88E6352 DSA support +M: Guenter Roeck +S: Maintained +F: drivers/net/dsa/mv88e6352.c + MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2) M: Mirko Lindner M: Stephen Hemminger diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 9234d808cbb3..0987c336e517 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -45,6 +45,14 @@ config NET_DSA_MV88E6171 This enables support for the Marvell 88E6171 ethernet switch chip. +config NET_DSA_MV88E6352 + tristate "Marvell 88E6352 ethernet switch chip support" + select NET_DSA + select NET_DSA_MV88E6XXX + select NET_DSA_TAG_EDSA + ---help--- + This enables support for the Marvell 88E6352 ethernet switch chip. + config NET_DSA_BCM_SF2 tristate "Broadcom Starfighter 2 Ethernet switch support" depends on HAS_IOMEM diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index 23a90de9830e..e2d51c4b9382 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -7,6 +7,9 @@ endif ifdef CONFIG_NET_DSA_MV88E6131 mv88e6xxx_drv-y += mv88e6131.o endif +ifdef CONFIG_NET_DSA_MV88E6352 +mv88e6xxx_drv-y += mv88e6352.o +endif ifdef CONFIG_NET_DSA_MV88E6171 mv88e6xxx_drv-y += mv88e6171.o endif diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c new file mode 100644 index 000000000000..43a58262a03d --- /dev/null +++ b/drivers/net/dsa/mv88e6352.c @@ -0,0 +1,464 @@ +/* + * net/dsa/mv88e6352.c - Marvell 88e6352 switch chip support + * + * Copyright (c) 2014 Guenter Roeck + * + * Derived from mv88e6123_61_65.c + * Copyright (c) 2008-2009 Marvell Semiconductor + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "mv88e6xxx.h" + +static int mv88e6352_phy_wait(struct dsa_switch *ds) +{ + unsigned long timeout = jiffies + HZ / 10; + + while (time_before(jiffies, timeout)) { + int ret; + + ret = REG_READ(REG_GLOBAL2, 0x18); + if (ret < 0) + return ret; + + if (!(ret & 0x8000)) + return 0; + + usleep_range(1000, 2000); + } + return -ETIMEDOUT; +} + +static int __mv88e6352_phy_read(struct dsa_switch *ds, int addr, int regnum) +{ + int ret; + + REG_WRITE(REG_GLOBAL2, 0x18, 0x9800 | (addr << 5) | regnum); + + ret = mv88e6352_phy_wait(ds); + if (ret < 0) + return ret; + + return REG_READ(REG_GLOBAL2, 0x19); +} + +static int __mv88e6352_phy_write(struct dsa_switch *ds, int addr, int regnum, + u16 val) +{ + REG_WRITE(REG_GLOBAL2, 0x19, val); + REG_WRITE(REG_GLOBAL2, 0x18, 0x9400 | (addr << 5) | regnum); + + return mv88e6352_phy_wait(ds); +} + +static char *mv88e6352_probe(struct device *host_dev, int sw_addr) +{ + struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); + int ret; + + if (bus == NULL) + return NULL; + + ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); + if (ret >= 0) { + if (ret == 0x3521) + return "Marvell 88E6352 (A0)"; + if (ret == 0x3522) + return "Marvell 88E6352 (A1)"; + if ((ret & 0xfff0) == 0x3520) + return "Marvell 88E6352"; + } + + return NULL; +} + +static int mv88e6352_switch_reset(struct dsa_switch *ds) +{ + unsigned long timeout; + int ret; + int i; + + /* Set all ports to the disabled state. */ + for (i = 0; i < 7; i++) { + ret = REG_READ(REG_PORT(i), 0x04); + REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc); + } + + /* Wait for transmit queues to drain. */ + usleep_range(2000, 4000); + + /* Reset the switch. Keep PPU active (bit 14, undocumented). + * The PPU needs to be active to support indirect phy register + * accesses through global registers 0x18 and 0x19. + */ + REG_WRITE(REG_GLOBAL, 0x04, 0xc000); + + /* Wait up to one second for reset to complete. */ + timeout = jiffies + 1 * HZ; + while (time_before(jiffies, timeout)) { + ret = REG_READ(REG_GLOBAL, 0x00); + if ((ret & 0x8800) == 0x8800) + break; + usleep_range(1000, 2000); + } + if (time_after(jiffies, timeout)) + return -ETIMEDOUT; + + return 0; +} + +static int mv88e6352_setup_global(struct dsa_switch *ds) +{ + int ret; + int i; + + /* Discard packets with excessive collisions, + * mask all interrupt sources, enable PPU (bit 14, undocumented). + */ + REG_WRITE(REG_GLOBAL, 0x04, 0x6000); + + /* Set the default address aging time to 5 minutes, and + * enable address learn messages to be sent to all message + * ports. + */ + REG_WRITE(REG_GLOBAL, 0x0a, 0x0148); + + /* Configure the priority mapping registers. */ + ret = mv88e6xxx_config_prio(ds); + if (ret < 0) + return ret; + + /* Configure the upstream port, and configure the upstream + * port as the port to which ingress and egress monitor frames + * are to be sent. + */ + REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1110)); + + /* Disable remote management for now, and set the switch's + * DSA device number. + */ + REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f); + + /* Send all frames with destination addresses matching + * 01:80:c2:00:00:2x to the CPU port. + */ + REG_WRITE(REG_GLOBAL2, 0x02, 0xffff); + + /* Send all frames with destination addresses matching + * 01:80:c2:00:00:0x to the CPU port. + */ + REG_WRITE(REG_GLOBAL2, 0x03, 0xffff); + + /* Disable the loopback filter, disable flow control + * messages, disable flood broadcast override, disable + * removing of provider tags, disable ATU age violation + * interrupts, disable tag flow control, force flow + * control priority to the highest, and send all special + * multicast frames to the CPU at the highest priority. + */ + REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff); + + /* Program the DSA routing table. */ + for (i = 0; i < 32; i++) { + int nexthop = 0x1f; + + if (i != ds->index && i < ds->dst->pd->nr_chips) + nexthop = ds->pd->rtable[i] & 0x1f; + + REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop); + } + + /* Clear all trunk masks. */ + for (i = 0; i < 8; i++) + REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0x7f); + + /* Clear all trunk mappings. */ + for (i = 0; i < 16; i++) + REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11)); + + /* Disable ingress rate limiting by resetting all ingress + * rate limit registers to their initial state. + */ + for (i = 0; i < 7; i++) + REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8)); + + /* Initialise cross-chip port VLAN table to reset defaults. */ + REG_WRITE(REG_GLOBAL2, 0x0b, 0x9000); + + /* Clear the priority override table. */ + for (i = 0; i < 16; i++) + REG_WRITE(REG_GLOBAL2, 0x0f, 0x8000 | (i << 8)); + + /* @@@ initialise AVB (22/23) watchdog (27) sdet (29) registers */ + + return 0; +} + +static int mv88e6352_setup_port(struct dsa_switch *ds, int p) +{ + int addr = REG_PORT(p); + u16 val; + + /* MAC Forcing register: don't force link, speed, duplex + * or flow control state to any particular values on physical + * ports, but force the CPU port and all DSA ports to 1000 Mb/s + * full duplex. + */ + if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p)) + REG_WRITE(addr, 0x01, 0x003e); + else + REG_WRITE(addr, 0x01, 0x0003); + + /* Do not limit the period of time that this port can be + * paused for by the remote end or the period of time that + * this port can pause the remote end. + */ + REG_WRITE(addr, 0x02, 0x0000); + + /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock, + * disable Header mode, enable IGMP/MLD snooping, disable VLAN + * tunneling, determine priority by looking at 802.1p and IP + * priority fields (IP prio has precedence), and set STP state + * to Forwarding. + * + * If this is the CPU link, use DSA or EDSA tagging depending + * on which tagging mode was configured. + * + * If this is a link to another switch, use DSA tagging mode. + * + * If this is the upstream port for this switch, enable + * forwarding of unknown unicasts and multicasts. + */ + val = 0x0433; + if (dsa_is_cpu_port(ds, p)) { + if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA) + val |= 0x3300; + else + val |= 0x0100; + } + if (ds->dsa_port_mask & (1 << p)) + val |= 0x0100; + if (p == dsa_upstream_port(ds)) + val |= 0x000c; + REG_WRITE(addr, 0x04, val); + + /* Port Control 1: disable trunking. Also, if this is the + * CPU port, enable learn messages to be sent to this port. + */ + REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000); + + /* Port based VLAN map: give each port its own address + * database, allow the CPU port to talk to each of the 'real' + * ports, and allow each of the 'real' ports to only talk to + * the upstream port. + */ + val = (p & 0xf) << 12; + if (dsa_is_cpu_port(ds, p)) + val |= ds->phys_port_mask; + else + val |= 1 << dsa_upstream_port(ds); + REG_WRITE(addr, 0x06, val); + + /* Default VLAN ID and priority: don't set a default VLAN + * ID, and set the default packet priority to zero. + */ + REG_WRITE(addr, 0x07, 0x0000); + + /* Port Control 2: don't force a good FCS, set the maximum + * frame size to 10240 bytes, don't let the switch add or + * strip 802.1q tags, don't discard tagged or untagged frames + * on this port, do a destination address lookup on all + * received packets as usual, disable ARP mirroring and don't + * send a copy of all transmitted/received frames on this port + * to the CPU. + */ + REG_WRITE(addr, 0x08, 0x2080); + + /* Egress rate control: disable egress rate control. */ + REG_WRITE(addr, 0x09, 0x0001); + + /* Egress rate control 2: disable egress rate control. */ + REG_WRITE(addr, 0x0a, 0x0000); + + /* Port Association Vector: when learning source addresses + * of packets, add the address to the address database using + * a port bitmap that has only the bit for this port set and + * the other bits clear. + */ + REG_WRITE(addr, 0x0b, 1 << p); + + /* Port ATU control: disable limiting the number of address + * database entries that this port is allowed to use. + */ + REG_WRITE(addr, 0x0c, 0x0000); + + /* Priority Override: disable DA, SA and VTU priority override. */ + REG_WRITE(addr, 0x0d, 0x0000); + + /* Port Ethertype: use the Ethertype DSA Ethertype value. */ + REG_WRITE(addr, 0x0f, ETH_P_EDSA); + + /* Tag Remap: use an identity 802.1p prio -> switch prio + * mapping. + */ + REG_WRITE(addr, 0x18, 0x3210); + + /* Tag Remap 2: use an identity 802.1p prio -> switch prio + * mapping. + */ + REG_WRITE(addr, 0x19, 0x7654); + + return 0; +} + +static int mv88e6352_setup(struct dsa_switch *ds) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int ret; + int i; + + mutex_init(&ps->smi_mutex); + mutex_init(&ps->stats_mutex); + mutex_init(&ps->phy_mutex); + + ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0; + + ret = mv88e6352_switch_reset(ds); + if (ret < 0) + return ret; + + /* @@@ initialise vtu and atu */ + + ret = mv88e6352_setup_global(ds); + if (ret < 0) + return ret; + + for (i = 0; i < 7; i++) { + ret = mv88e6352_setup_port(ds, i); + if (ret < 0) + return ret; + } + + return 0; +} + +static int mv88e6352_port_to_phy_addr(int port) +{ + if (port >= 0 && port <= 4) + return port; + return -EINVAL; +} + +static int +mv88e6352_phy_read(struct dsa_switch *ds, int port, int regnum) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int addr = mv88e6352_port_to_phy_addr(port); + int ret; + + if (addr < 0) + return addr; + + mutex_lock(&ps->phy_mutex); + ret = __mv88e6352_phy_read(ds, addr, regnum); + mutex_unlock(&ps->phy_mutex); + + return ret; +} + +static int +mv88e6352_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int addr = mv88e6352_port_to_phy_addr(port); + int ret; + + if (addr < 0) + return addr; + + mutex_lock(&ps->phy_mutex); + ret = __mv88e6352_phy_write(ds, addr, regnum, val); + mutex_unlock(&ps->phy_mutex); + + return ret; +} + +static struct mv88e6xxx_hw_stat mv88e6352_hw_stats[] = { + { "in_good_octets", 8, 0x00, }, + { "in_bad_octets", 4, 0x02, }, + { "in_unicast", 4, 0x04, }, + { "in_broadcasts", 4, 0x06, }, + { "in_multicasts", 4, 0x07, }, + { "in_pause", 4, 0x16, }, + { "in_undersize", 4, 0x18, }, + { "in_fragments", 4, 0x19, }, + { "in_oversize", 4, 0x1a, }, + { "in_jabber", 4, 0x1b, }, + { "in_rx_error", 4, 0x1c, }, + { "in_fcs_error", 4, 0x1d, }, + { "out_octets", 8, 0x0e, }, + { "out_unicast", 4, 0x10, }, + { "out_broadcasts", 4, 0x13, }, + { "out_multicasts", 4, 0x12, }, + { "out_pause", 4, 0x15, }, + { "excessive", 4, 0x11, }, + { "collisions", 4, 0x1e, }, + { "deferred", 4, 0x05, }, + { "single", 4, 0x14, }, + { "multiple", 4, 0x17, }, + { "out_fcs_error", 4, 0x03, }, + { "late", 4, 0x1f, }, + { "hist_64bytes", 4, 0x08, }, + { "hist_65_127bytes", 4, 0x09, }, + { "hist_128_255bytes", 4, 0x0a, }, + { "hist_256_511bytes", 4, 0x0b, }, + { "hist_512_1023bytes", 4, 0x0c, }, + { "hist_1024_max_bytes", 4, 0x0d, }, +}; + +static void +mv88e6352_get_strings(struct dsa_switch *ds, int port, uint8_t *data) +{ + mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6352_hw_stats), + mv88e6352_hw_stats, port, data); +} + +static void +mv88e6352_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) +{ + mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6352_hw_stats), + mv88e6352_hw_stats, port, data); +} + +static int mv88e6352_get_sset_count(struct dsa_switch *ds) +{ + return ARRAY_SIZE(mv88e6352_hw_stats); +} + +struct dsa_switch_driver mv88e6352_switch_driver = { + .tag_protocol = DSA_TAG_PROTO_EDSA, + .priv_size = sizeof(struct mv88e6xxx_priv_state), + .probe = mv88e6352_probe, + .setup = mv88e6352_setup, + .set_addr = mv88e6xxx_set_addr_indirect, + .phy_read = mv88e6352_phy_read, + .phy_write = mv88e6352_phy_write, + .poll_link = mv88e6xxx_poll_link, + .get_strings = mv88e6352_get_strings, + .get_ethtool_stats = mv88e6352_get_ethtool_stats, + .get_sset_count = mv88e6352_get_sset_count, +}; + +MODULE_ALIAS("platform:mv88e6352"); diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index a6c90cf5634d..8e1090ba3df3 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -507,6 +507,9 @@ static int __init mv88e6xxx_init(void) #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65) register_switch_driver(&mv88e6123_61_65_switch_driver); #endif +#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352) + register_switch_driver(&mv88e6352_switch_driver); +#endif #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171) register_switch_driver(&mv88e6171_switch_driver); #endif diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 5e5145ad9525..c0ce133c756b 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -37,6 +37,12 @@ struct mv88e6xxx_priv_state { */ struct mutex stats_mutex; + /* This mutex serializes phy access for chips with + * indirect phy addressing. It is unused for chips + * with direct phy access. + */ + struct mutex phy_mutex; + int id; /* switch product id */ }; @@ -70,6 +76,7 @@ void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, extern struct dsa_switch_driver mv88e6131_switch_driver; extern struct dsa_switch_driver mv88e6123_61_65_switch_driver; +extern struct dsa_switch_driver mv88e6352_switch_driver; extern struct dsa_switch_driver mv88e6171_switch_driver; #define REG_READ(addr, reg) \ -- cgit v1.2.3-59-g8ed1b From b1c97193c6437a6083da67f8e97c8ee29b2f1989 Mon Sep 17 00:00:00 2001 From: Sean Young Date: Thu, 23 Oct 2014 17:58:22 -0300 Subject: [media] rc: port IgorPlug-USB to rc-core This is a complete re-write inspired by the original lirc driver. Signed-off-by: Sean Young Signed-off-by: Mauro Carvalho Chehab --- MAINTAINERS | 6 + drivers/media/rc/Kconfig | 15 +++ drivers/media/rc/Makefile | 1 + drivers/media/rc/igorplugusb.c | 261 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 283 insertions(+) create mode 100644 drivers/media/rc/igorplugusb.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index dab92a78d1d5..1629341dcd9e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4695,6 +4695,12 @@ F: net/mac802154/ F: drivers/net/ieee802154/ F: Documentation/networking/ieee802154.txt +IGORPLUG-USB IR RECEIVER +M: Sean Young +L: linux-media@vger.kernel.org +S: Maintained +F: drivers/media/rc/igorplugusb.c + IGUANAWORKS USB IR TRANSCEIVER M: Sean Young L: linux-media@vger.kernel.org diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig index 8ce08107a69d..1aea7320f52a 100644 --- a/drivers/media/rc/Kconfig +++ b/drivers/media/rc/Kconfig @@ -277,6 +277,21 @@ config IR_WINBOND_CIR To compile this driver as a module, choose M here: the module will be called winbond_cir. +config IR_IGORPLUGUSB + tristate "IgorPlug-USB IR Receiver" + depends on USB_ARCH_HAS_HCD + depends on RC_CORE + select USB + ---help--- + Say Y here if you want to use the IgorPlug-USB IR Receiver by + Igor Cesko. This device is included on the Fit-PC2. + + Note that this device can only record bursts of 36 IR pulses and + spaces, which is not enough for the NEC, Sanyo and RC-6 protocol. + + To compile this driver as a module, choose M here: the module will + be called igorplugusb. + config IR_IGUANA tristate "IguanaWorks USB IR Transceiver" depends on USB_ARCH_HAS_HCD diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile index 0989f940e9cf..8f509e0d92cf 100644 --- a/drivers/media/rc/Makefile +++ b/drivers/media/rc/Makefile @@ -30,6 +30,7 @@ obj-$(CONFIG_IR_STREAMZAP) += streamzap.o obj-$(CONFIG_IR_WINBOND_CIR) += winbond-cir.o obj-$(CONFIG_RC_LOOPBACK) += rc-loopback.o obj-$(CONFIG_IR_GPIO_CIR) += gpio-ir-recv.o +obj-$(CONFIG_IR_IGORPLUGUSB) += igorplugusb.o obj-$(CONFIG_IR_IGUANA) += iguanair.o obj-$(CONFIG_IR_TTUSBIR) += ttusbir.o obj-$(CONFIG_RC_ST) += st_rc.o diff --git a/drivers/media/rc/igorplugusb.c b/drivers/media/rc/igorplugusb.c new file mode 100644 index 000000000000..b36e51576f8e --- /dev/null +++ b/drivers/media/rc/igorplugusb.c @@ -0,0 +1,261 @@ +/* + * IgorPlug-USB IR Receiver + * + * Copyright (C) 2014 Sean Young + * + * Supports the standard homebrew IgorPlugUSB receiver with Igor's firmware. + * See http://www.cesko.host.sk/IgorPlugUSB/IgorPlug-USB%20(AVR)_eng.htm + * + * Based on the lirc_igorplugusb.c driver: + * Copyright (C) 2004 Jan M. Hochstein + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include + +#define DRIVER_DESC "IgorPlug-USB IR Receiver" +#define DRIVER_NAME "igorplugusb" + +#define HEADERLEN 3 +#define BUFLEN 36 +#define MAX_PACKET (HEADERLEN + BUFLEN) + +#define SET_INFRABUFFER_EMPTY 1 +#define GET_INFRACODE 2 + + +struct igorplugusb { + struct rc_dev *rc; + struct device *dev; + + struct urb *urb; + struct usb_ctrlrequest request; + + struct timer_list timer; + + uint8_t buf_in[MAX_PACKET]; + + char phys[64]; +}; + +static void igorplugusb_cmd(struct igorplugusb *ir, int cmd); + +static void igorplugusb_irdata(struct igorplugusb *ir, unsigned len) +{ + DEFINE_IR_RAW_EVENT(rawir); + unsigned i, start, overflow; + + dev_dbg(ir->dev, "irdata: %*ph (len=%u)", len, ir->buf_in, len); + + /* + * If more than 36 pulses and spaces follow each other, the igorplugusb + * overwrites its buffer from the beginning. The overflow value is the + * last offset which was not overwritten. Everything from this offset + * onwards occurred before everything until this offset. + */ + overflow = ir->buf_in[2]; + i = start = overflow + HEADERLEN; + + if (start >= len) { + dev_err(ir->dev, "receive overflow invalid: %u", overflow); + } else { + if (overflow > 0) + dev_warn(ir->dev, "receive overflow, at least %u lost", + overflow); + + do { + rawir.duration = ir->buf_in[i] * 85333; + rawir.pulse = i & 1; + + ir_raw_event_store_with_filter(ir->rc, &rawir); + + if (++i == len) + i = HEADERLEN; + } while (i != start); + + /* add a trailing space */ + rawir.duration = ir->rc->timeout; + rawir.pulse = false; + ir_raw_event_store_with_filter(ir->rc, &rawir); + + ir_raw_event_handle(ir->rc); + } + + igorplugusb_cmd(ir, SET_INFRABUFFER_EMPTY); +} + +static void igorplugusb_callback(struct urb *urb) +{ + struct usb_ctrlrequest *req; + struct igorplugusb *ir = urb->context; + + req = (struct usb_ctrlrequest *)urb->setup_packet; + + switch (urb->status) { + case 0: + if (req->bRequest == GET_INFRACODE && + urb->actual_length > HEADERLEN) + igorplugusb_irdata(ir, urb->actual_length); + else /* request IR */ + mod_timer(&ir->timer, jiffies + msecs_to_jiffies(50)); + break; + case -EPROTO: + case -ECONNRESET: + case -ENOENT: + case -ESHUTDOWN: + usb_unlink_urb(urb); + return; + default: + dev_warn(ir->dev, "Error: urb status = %d\n", urb->status); + igorplugusb_cmd(ir, SET_INFRABUFFER_EMPTY); + break; + } +} + +static void igorplugusb_cmd(struct igorplugusb *ir, int cmd) +{ + int ret; + + ir->request.bRequest = cmd; + ir->urb->transfer_flags = 0; + ret = usb_submit_urb(ir->urb, GFP_ATOMIC); + if (ret) + dev_err(ir->dev, "submit urb failed: %d", ret); +} + +static void igorplugusb_timer(unsigned long data) +{ + struct igorplugusb *ir = (struct igorplugusb *)data; + + igorplugusb_cmd(ir, GET_INFRACODE); +} + +static int igorplugusb_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct usb_device *udev; + struct usb_host_interface *idesc; + struct usb_endpoint_descriptor *ep; + struct igorplugusb *ir; + struct rc_dev *rc; + int ret; + + udev = interface_to_usbdev(intf); + idesc = intf->cur_altsetting; + + if (idesc->desc.bNumEndpoints != 1) { + dev_err(&intf->dev, "incorrect number of endpoints"); + return -ENODEV; + } + + ep = &idesc->endpoint[0].desc; + if (!usb_endpoint_dir_in(ep) || !usb_endpoint_xfer_control(ep)) { + dev_err(&intf->dev, "endpoint incorrect"); + return -ENODEV; + } + + ir = devm_kzalloc(&intf->dev, sizeof(*ir), GFP_KERNEL); + if (!ir) + return -ENOMEM; + + ir->dev = &intf->dev; + + setup_timer(&ir->timer, igorplugusb_timer, (unsigned long)ir); + + ir->request.bRequest = GET_INFRACODE; + ir->request.bRequestType = USB_TYPE_VENDOR | USB_DIR_IN; + ir->request.wLength = cpu_to_le16(sizeof(ir->buf_in)); + + ir->urb = usb_alloc_urb(0, GFP_KERNEL); + if (!ir->urb) + return -ENOMEM; + + usb_fill_control_urb(ir->urb, udev, + usb_rcvctrlpipe(udev, 0), (uint8_t *)&ir->request, + ir->buf_in, sizeof(ir->buf_in), igorplugusb_callback, ir); + + usb_make_path(udev, ir->phys, sizeof(ir->phys)); + + rc = rc_allocate_device(); + rc->input_name = DRIVER_DESC; + rc->input_phys = ir->phys; + usb_to_input_id(udev, &rc->input_id); + rc->dev.parent = &intf->dev; + rc->driver_type = RC_DRIVER_IR_RAW; + /* + * This device can only store 36 pulses + spaces, which is not enough + * for the NEC protocol and many others. + */ + rc->allowed_protocols = RC_BIT_ALL & ~(RC_BIT_NEC | RC_BIT_RC6_6A_20 | + RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE | + RC_BIT_SONY20 | RC_BIT_MCE_KBD | RC_BIT_SANYO); + + rc->priv = ir; + rc->driver_name = DRIVER_NAME; + rc->map_name = RC_MAP_HAUPPAUGE; + rc->timeout = MS_TO_NS(100); + rc->rx_resolution = 85333; + + ir->rc = rc; + ret = rc_register_device(rc); + if (ret) { + dev_err(&intf->dev, "failed to register rc device: %d", ret); + rc_free_device(rc); + usb_free_urb(ir->urb); + return ret; + } + + usb_set_intfdata(intf, ir); + + igorplugusb_cmd(ir, SET_INFRABUFFER_EMPTY); + + return 0; +} + +static void igorplugusb_disconnect(struct usb_interface *intf) +{ + struct igorplugusb *ir = usb_get_intfdata(intf); + + rc_unregister_device(ir->rc); + del_timer_sync(&ir->timer); + usb_set_intfdata(intf, NULL); + usb_kill_urb(ir->urb); + usb_free_urb(ir->urb); +} + +static struct usb_device_id igorplugusb_table[] = { + /* Igor Plug USB (Atmel's Manufact. ID) */ + { USB_DEVICE(0x03eb, 0x0002) }, + /* Fit PC2 Infrared Adapter */ + { USB_DEVICE(0x03eb, 0x21fe) }, + /* Terminating entry */ + { } +}; + +static struct usb_driver igorplugusb_driver = { + .name = DRIVER_NAME, + .probe = igorplugusb_probe, + .disconnect = igorplugusb_disconnect, + .id_table = igorplugusb_table +}; + +module_usb_driver(igorplugusb_driver); + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_AUTHOR("Sean Young "); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(usb, igorplugusb_table); -- cgit v1.2.3-59-g8ed1b From 2e069232fd470175068cbd6eaf8879ef47d772f8 Mon Sep 17 00:00:00 2001 From: Mark Einon Date: Sun, 19 Oct 2014 20:25:26 +0100 Subject: MAINTAINERS: Remove duplicate entry for usbip driver The usbip driver was moved out of staging in 3.17-rc3 but the MAINTAINERS file still has the old staging entry as well as the new one. Remove the old entry. Signed-off-by: Mark Einon Signed-off-by: Greg Kroah-Hartman --- MAINTAINERS | 5 ----- 1 file changed, 5 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 3c6427190be2..22d0bb08127f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9703,11 +9703,6 @@ S: Maintained F: Documentation/hid/hiddev.txt F: drivers/hid/usbhid/ -USB/IP DRIVERS -L: linux-usb@vger.kernel.org -S: Orphan -F: drivers/staging/usbip/ - USB ISP116X DRIVER M: Olav Kongas L: linux-usb@vger.kernel.org -- cgit v1.2.3-59-g8ed1b From 0b14261edc8c7c7ef6d30133df9188f5a7f3d85f Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Tue, 4 Nov 2014 07:26:35 -0300 Subject: [media] MAINTAINERS: add maintainer for CODA video4linux mem2mem driver Add myself as maintainer for the CODA V4L2 mem2mem driver. Signed-off-by: Philipp Zabel Signed-off-by: Mauro Carvalho Chehab --- MAINTAINERS | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 1629341dcd9e..39e3f9155ebb 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2487,6 +2487,13 @@ F: fs/coda/ F: include/linux/coda*.h F: include/uapi/linux/coda*.h +CODA V4L2 MEM2MEM DRIVER +M: Philipp Zabel +L: linux-media@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/media/coda.txt +F: drivers/media/platform/coda/ + COMMON CLK FRAMEWORK M: Mike Turquette L: linux-kernel@vger.kernel.org -- cgit v1.2.3-59-g8ed1b From 8bcdd9297d213c6f8c40b87423581cea0fbf37c2 Mon Sep 17 00:00:00 2001 From: Stephen Warren Date: Tue, 4 Nov 2014 20:27:17 -0700 Subject: MAINTAINERS: update bcm2835 entry Add Lee Jones as a new co-maintainer. The kernel.org repo moved to allow us both to push to it. Update MAINTAINERS to match. Signed-off-by: Stephen Warren Signed-off-by: Olof Johansson --- MAINTAINERS | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index f155c2a1255e..7b690fc1e449 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2072,8 +2072,9 @@ F: drivers/clocksource/bcm_kona_timer.c BROADCOM BCM2835 ARM ARCHITECTURE M: Stephen Warren +M: Lee Jones L: linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers) -T: git git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-rpi.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/rpi/linux-rpi.git S: Maintained N: bcm2835 -- cgit v1.2.3-59-g8ed1b From 9f3295b9ea8e54a6c65231d267f069edf420b64f Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Wed, 5 Nov 2014 20:51:13 +0100 Subject: ieee802154: remove nl802154 unused functions The include/net/nl802154.h file contains a lot of prototypes which are not used inside of ieee802154 subsystem. This patch removes this file and make the only one used prototype "ieee802154_nl_start_confirm" as static declaration in ieee802154/nl-mac.c Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- MAINTAINERS | 1 - include/net/nl802154.h | 122 ------------------------------- net/ieee802154/nl-mac.c | 187 ++---------------------------------------------- net/mac802154/mac_cmd.c | 6 -- 4 files changed, 6 insertions(+), 310 deletions(-) delete mode 100644 include/net/nl802154.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 7ec37a396ffe..b42eb50b7426 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4699,7 +4699,6 @@ F: net/mac802154/ F: drivers/net/ieee802154/ F: include/linux/nl802154.h F: include/linux/ieee802154.h -F: include/net/nl802154.h F: include/net/mac802154.h F: include/net/af_ieee802154.h F: include/net/cfg802154.h diff --git a/include/net/nl802154.h b/include/net/nl802154.h deleted file mode 100644 index b5cdea29d9d9..000000000000 --- a/include/net/nl802154.h +++ /dev/null @@ -1,122 +0,0 @@ -/* - * nl802154.h - * - * Copyright (C) 2007, 2008, 2009 Siemens AG - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#ifndef IEEE802154_NL_H -#define IEEE802154_NL_H - -struct net_device; -struct ieee802154_addr; - -/** - * ieee802154_nl_assoc_indic - Notify userland of an association request. - * @dev: The network device on which this association request was - * received. - * @addr: The address of the device requesting association. - * @cap: The capability information field from the device. - * - * This informs a userland coordinator of a device requesting to - * associate with the PAN controlled by the coordinator. - * - * Note: This is in section 7.3.1 of the IEEE 802.15.4-2006 document. - */ -int ieee802154_nl_assoc_indic(struct net_device *dev, - struct ieee802154_addr *addr, u8 cap); - -/** - * ieee802154_nl_assoc_confirm - Notify userland of association. - * @dev: The device which has completed association. - * @short_addr: The short address assigned to the device. - * @status: The status of the association. - * - * Inform userland of the result of an association request. If the - * association request included asking the coordinator to allocate - * a short address then it is returned in @short_addr. - * - * Note: This is in section 7.3.2 of the IEEE 802.15.4 document. - */ -int ieee802154_nl_assoc_confirm(struct net_device *dev, - __le16 short_addr, u8 status); - -/** - * ieee802154_nl_disassoc_indic - Notify userland of disassociation. - * @dev: The device on which disassociation was indicated. - * @addr: The device which is disassociating. - * @reason: The reason for the disassociation. - * - * Inform userland that a device has disassociated from the network. - * - * Note: This is in section 7.3.3 of the IEEE 802.15.4 document. - */ -int ieee802154_nl_disassoc_indic(struct net_device *dev, - struct ieee802154_addr *addr, u8 reason); - -/** - * ieee802154_nl_disassoc_confirm - Notify userland of disassociation - * completion. - * @dev: The device on which disassociation was ordered. - * @status: The result of the disassociation. - * - * Inform userland of the result of requesting that a device - * disassociate, or the result of requesting that we disassociate from - * a PAN managed by another coordinator. - * - * Note: This is in section 7.1.4.3 of the IEEE 802.15.4 document. - */ -int ieee802154_nl_disassoc_confirm(struct net_device *dev, - u8 status); - -/** - * ieee802154_nl_scan_confirm - Notify userland of completion of scan. - * @dev: The device which was instructed to scan. - * @status: The status of the scan operation. - * @scan_type: What type of scan was performed. - * @unscanned: Any channels that the device was unable to scan. - * @edl: The energy levels (if a passive scan). - * - * - * Note: This is in section 7.1.11 of the IEEE 802.15.4 document. - * Note: This API does not permit the return of an active scan result. - */ -int ieee802154_nl_scan_confirm(struct net_device *dev, - u8 status, u8 scan_type, u32 unscanned, u8 page, - u8 *edl/*, struct list_head *pan_desc_list */); - -/** - * ieee802154_nl_beacon_indic - Notify userland of a received beacon. - * @dev: The device on which a beacon was received. - * @panid: The PAN of the coordinator. - * @coord_addr: The short address of the coordinator on that PAN. - * - * Note: This is in section 7.1.5 of the IEEE 802.15.4 document. - * Note: This API does not provide extended information such as what - * channel the PAN is on or what the LQI of the beacon frame was on - * receipt. - * Note: This API cannot indicate a beacon frame for a coordinator - * operating in long addressing mode. - */ -int ieee802154_nl_beacon_indic(struct net_device *dev, __le16 panid, - __le16 coord_addr); - -/** - * ieee802154_nl_start_confirm - Notify userland of completion of start. - * @dev: The device which was instructed to scan. - * @status: The status of the scan operation. - * - * Note: This is in section 7.1.14 of the IEEE 802.15.4 document. - */ -int ieee802154_nl_start_confirm(struct net_device *dev, u8 status); - -#endif diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c index cc2919dbe5e0..91a1855e521c 100644 --- a/net/ieee802154/nl-mac.c +++ b/net/ieee802154/nl-mac.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include @@ -55,186 +54,7 @@ static __le16 nla_get_shortaddr(const struct nlattr *nla) return cpu_to_le16(nla_get_u16(nla)); } -int ieee802154_nl_assoc_indic(struct net_device *dev, - struct ieee802154_addr *addr, - u8 cap) -{ - struct sk_buff *msg; - - pr_debug("%s\n", __func__); - - if (addr->mode != IEEE802154_ADDR_LONG) { - pr_err("%s: received non-long source address!\n", __func__); - return -EINVAL; - } - - msg = ieee802154_nl_create(0, IEEE802154_ASSOCIATE_INDIC); - if (!msg) - return -ENOBUFS; - - if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || - nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || - nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, - dev->dev_addr) || - nla_put_hwaddr(msg, IEEE802154_ATTR_SRC_HW_ADDR, - addr->extended_addr) || - nla_put_u8(msg, IEEE802154_ATTR_CAPABILITY, cap)) - goto nla_put_failure; - - return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP); - -nla_put_failure: - nlmsg_free(msg); - return -ENOBUFS; -} -EXPORT_SYMBOL(ieee802154_nl_assoc_indic); - -int ieee802154_nl_assoc_confirm(struct net_device *dev, __le16 short_addr, - u8 status) -{ - struct sk_buff *msg; - - pr_debug("%s\n", __func__); - - msg = ieee802154_nl_create(0, IEEE802154_ASSOCIATE_CONF); - if (!msg) - return -ENOBUFS; - - if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || - nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || - nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, - dev->dev_addr) || - nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr) || - nla_put_u8(msg, IEEE802154_ATTR_STATUS, status)) - goto nla_put_failure; - return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP); - -nla_put_failure: - nlmsg_free(msg); - return -ENOBUFS; -} -EXPORT_SYMBOL(ieee802154_nl_assoc_confirm); - -int ieee802154_nl_disassoc_indic(struct net_device *dev, - struct ieee802154_addr *addr, - u8 reason) -{ - struct sk_buff *msg; - - pr_debug("%s\n", __func__); - - msg = ieee802154_nl_create(0, IEEE802154_DISASSOCIATE_INDIC); - if (!msg) - return -ENOBUFS; - - if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || - nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || - nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, - dev->dev_addr)) - goto nla_put_failure; - if (addr->mode == IEEE802154_ADDR_LONG) { - if (nla_put_hwaddr(msg, IEEE802154_ATTR_SRC_HW_ADDR, - addr->extended_addr)) - goto nla_put_failure; - } else { - if (nla_put_shortaddr(msg, IEEE802154_ATTR_SRC_SHORT_ADDR, - addr->short_addr)) - goto nla_put_failure; - } - if (nla_put_u8(msg, IEEE802154_ATTR_REASON, reason)) - goto nla_put_failure; - return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP); - -nla_put_failure: - nlmsg_free(msg); - return -ENOBUFS; -} -EXPORT_SYMBOL(ieee802154_nl_disassoc_indic); - -int ieee802154_nl_disassoc_confirm(struct net_device *dev, u8 status) -{ - struct sk_buff *msg; - - pr_debug("%s\n", __func__); - - msg = ieee802154_nl_create(0, IEEE802154_DISASSOCIATE_CONF); - if (!msg) - return -ENOBUFS; - - if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || - nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || - nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, - dev->dev_addr) || - nla_put_u8(msg, IEEE802154_ATTR_STATUS, status)) - goto nla_put_failure; - return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP); - -nla_put_failure: - nlmsg_free(msg); - return -ENOBUFS; -} -EXPORT_SYMBOL(ieee802154_nl_disassoc_confirm); - -int ieee802154_nl_beacon_indic(struct net_device *dev, __le16 panid, - __le16 coord_addr) -{ - struct sk_buff *msg; - - pr_debug("%s\n", __func__); - - msg = ieee802154_nl_create(0, IEEE802154_BEACON_NOTIFY_INDIC); - if (!msg) - return -ENOBUFS; - - if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || - nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || - nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, - dev->dev_addr) || - nla_put_shortaddr(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, - coord_addr) || - nla_put_shortaddr(msg, IEEE802154_ATTR_COORD_PAN_ID, panid)) - goto nla_put_failure; - return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP); - -nla_put_failure: - nlmsg_free(msg); - return -ENOBUFS; -} -EXPORT_SYMBOL(ieee802154_nl_beacon_indic); - -int ieee802154_nl_scan_confirm(struct net_device *dev, - u8 status, u8 scan_type, - u32 unscanned, u8 page, - u8 *edl/* , struct list_head *pan_desc_list */) -{ - struct sk_buff *msg; - - pr_debug("%s\n", __func__); - - msg = ieee802154_nl_create(0, IEEE802154_SCAN_CONF); - if (!msg) - return -ENOBUFS; - - if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || - nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || - nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, - dev->dev_addr) || - nla_put_u8(msg, IEEE802154_ATTR_STATUS, status) || - nla_put_u8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type) || - nla_put_u32(msg, IEEE802154_ATTR_CHANNELS, unscanned) || - nla_put_u8(msg, IEEE802154_ATTR_PAGE, page) || - (edl && - nla_put(msg, IEEE802154_ATTR_ED_LIST, 27, edl))) - goto nla_put_failure; - return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP); - -nla_put_failure: - nlmsg_free(msg); - return -ENOBUFS; -} -EXPORT_SYMBOL(ieee802154_nl_scan_confirm); - -int ieee802154_nl_start_confirm(struct net_device *dev, u8 status) +static int ieee802154_nl_start_confirm(struct net_device *dev, u8 status) { struct sk_buff *msg; @@ -530,6 +350,11 @@ int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info) ret = ieee802154_mlme_ops(dev)->start_req(dev, &addr, channel, page, bcn_ord, sf_ord, pan_coord, blx, coord_realign); + /* FIXME: add validation for unused parameters to be sane + * for SoftMAC + */ + ieee802154_nl_start_confirm(dev, IEEE802154_SUCCESS); + out: dev_put(dev); return ret; diff --git a/net/mac802154/mac_cmd.c b/net/mac802154/mac_cmd.c index 9c2d6f61f194..e1ad83e35899 100644 --- a/net/mac802154/mac_cmd.c +++ b/net/mac802154/mac_cmd.c @@ -25,7 +25,6 @@ #include #include #include -#include #include "ieee802154_i.h" #include "driver-ops.h" @@ -65,11 +64,6 @@ static int mac802154_mlme_start_req(struct net_device *dev, rc = ops->llsec->set_params(dev, ¶ms, changed); } - /* FIXME: add validation for unused parameters to be sane - * for SoftMAC - */ - ieee802154_nl_start_confirm(dev, IEEE802154_SUCCESS); - return rc; } -- cgit v1.2.3-59-g8ed1b From 6bd0f43699736f30e46b21756d891af832e199eb Mon Sep 17 00:00:00 2001 From: Ludovic Desroches Date: Wed, 22 Oct 2014 17:22:20 +0200 Subject: MAINTAINERS: add entry for Atmel XDMA driver Signed-off-by: Ludovic Desroches Acked-by: Nicolas Ferre Signed-off-by: Vinod Koul --- MAINTAINERS | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 3c6427190be2..fd3771cba0c9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1713,6 +1713,13 @@ F: drivers/dma/at_hdmac.c F: drivers/dma/at_hdmac_regs.h F: include/linux/platform_data/dma-atmel.h +ATMEL XDMA DRIVER +M: Ludovic Desroches +L: linux-arm-kernel@lists.infradead.org +L: dmaengine@vger.kernel.org +S: Supported +F: drivers/dma/at_xdmac.c + ATMEL I2C DRIVER M: Ludovic Desroches L: linux-i2c@vger.kernel.org -- cgit v1.2.3-59-g8ed1b From 979a281efe1107fff9142b18c1605fb6b7413003 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Thu, 6 Nov 2014 11:21:17 +0530 Subject: MAINTAINERS: add Documentation files to dmaengine entry We were missing the Documentation files in the entry so add it now. While at it also remove comment on slave-dma for the tree Signed-off-by: Vinod Koul --- MAINTAINERS | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index fd3771cba0c9..b3f5493e9d38 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3080,7 +3080,8 @@ Q: https://patchwork.kernel.org/project/linux-dmaengine/list/ S: Maintained F: drivers/dma/ F: include/linux/dma* -T: git git://git.infradead.org/users/vkoul/slave-dma.git (slave-dma) +F: Documentation/dmaengine/ +T: git git://git.infradead.org/users/vkoul/slave-dma.git DME1737 HARDWARE MONITOR DRIVER M: Juerg Haefliger -- cgit v1.2.3-59-g8ed1b From 7645c2f4677198df449228a4736d7f94e5c14156 Mon Sep 17 00:00:00 2001 From: Kevin Cernekee Date: Tue, 21 Oct 2014 15:23:06 -0700 Subject: MAINTAINERS: Add entry for rp2 (Rocketport Express/Infinity) driver I wrote this driver and use it daily on several machines for work, so why not. Signed-off-by: Kevin Cernekee Acked-by: Florian Fainelli Signed-off-by: Greg Kroah-Hartman --- MAINTAINERS | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 3c6427190be2..878a56066924 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7813,6 +7813,12 @@ S: Maintained F: Documentation/serial/rocket.txt F: drivers/tty/rocket* +ROCKETPORT EXPRESS/INFINITY DRIVER +M: Kevin Cernekee +L: linux-serial@vger.kernel.org +S: Odd Fixes +F: drivers/tty/serial/rp2.* + ROSE NETWORK LAYER M: Ralf Baechle L: linux-hams@vger.kernel.org -- cgit v1.2.3-59-g8ed1b From a06ae8609b3dd06b957a6e4e965772a8a14d3af5 Mon Sep 17 00:00:00 2001 From: Pratik Patel Date: Mon, 3 Nov 2014 11:07:35 -0700 Subject: coresight: add CoreSight core layer framework CoreSight components are compliant with the ARM CoreSight architecture specification and can be connected in various topologies to suit a particular SoC tracing needs. These trace components can generally be classified as sources, links and sinks. Trace data produced by one or more sources flows through the intermediate links connecting the source to the currently selected sink. The CoreSight framework provides an interface for the CoreSight trace drivers to register themselves with. It's intended to build up a topological view of the CoreSight components and configure the correct serie of components on user input via sysfs. For eg., when enabling a source, the framework builds up a path consisting of all the components connecting the source to the currently selected sink(s) and enables all of them. The framework also supports switching between available sinks and provides status information to user space applications through the debugfs interface. Signed-off-by: Pratik Patel Signed-off-by: Mathieu Poirier Signed-off-by: Greg Kroah-Hartman --- MAINTAINERS | 8 + arch/arm/Kconfig.debug | 9 + drivers/Makefile | 1 + drivers/amba/bus.c | 2 +- drivers/coresight/Makefile | 5 + drivers/coresight/coresight-priv.h | 63 ++++ drivers/coresight/coresight.c | 717 +++++++++++++++++++++++++++++++++++++ drivers/coresight/of_coresight.c | 204 +++++++++++ include/linux/amba/bus.h | 1 + include/linux/coresight.h | 263 ++++++++++++++ 10 files changed, 1272 insertions(+), 1 deletion(-) create mode 100644 drivers/coresight/Makefile create mode 100644 drivers/coresight/coresight-priv.h create mode 100644 drivers/coresight/coresight.c create mode 100644 drivers/coresight/of_coresight.c create mode 100644 include/linux/coresight.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 3c6427190be2..39952634be8a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -918,6 +918,14 @@ M: Hubert Feurstein S: Maintained F: arch/arm/mach-ep93xx/micro9.c +ARM/CORESIGHT FRAMEWORK AND DRIVERS +M: Mathieu Poirier +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: drivers/coresight/* +F: Documentation/trace/coresight.txt +F: Documentation/devicetree/bindings/arm/coresight.txt + ARM/CORGI MACHINE SUPPORT M: Richard Purdie S: Maintained diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index 03dc4c1a8736..cd3890e3110e 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -1331,4 +1331,13 @@ config DEBUG_SET_MODULE_RONX against certain classes of kernel exploits. If in doubt, say "N". +menuconfig CORESIGHT + bool "CoreSight Tracing Support" + select ARM_AMBA + help + This framework provides a kernel interface for the CoreSight debug + and trace drivers to register themselves with. It's intended to build + a topological view of the CoreSight components based on a DT + specification and configure the right serie of components when a + trace source gets enabled. endmenu diff --git a/drivers/Makefile b/drivers/Makefile index ebee55537a05..628b512b625b 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -161,3 +161,4 @@ obj-$(CONFIG_POWERCAP) += powercap/ obj-$(CONFIG_MCB) += mcb/ obj-$(CONFIG_RAS) += ras/ obj-$(CONFIG_THUNDERBOLT) += thunderbolt/ +obj-$(CONFIG_CORESIGHT) += coresight/ diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index 47bbdc1b5be3..a4ac490dd784 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -336,7 +336,7 @@ int amba_device_add(struct amba_device *dev, struct resource *parent) amba_put_disable_pclk(dev); - if (cid == AMBA_CID) + if (cid == AMBA_CID || cid == CORESIGHT_CID) dev->periphid = pid; if (!dev->periphid) diff --git a/drivers/coresight/Makefile b/drivers/coresight/Makefile new file mode 100644 index 000000000000..218e3b589f93 --- /dev/null +++ b/drivers/coresight/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for CoreSight drivers. +# +obj-$(CONFIG_CORESIGHT) += coresight.o +obj-$(CONFIG_OF) += of_coresight.o diff --git a/drivers/coresight/coresight-priv.h b/drivers/coresight/coresight-priv.h new file mode 100644 index 000000000000..8d1180c47c0b --- /dev/null +++ b/drivers/coresight/coresight-priv.h @@ -0,0 +1,63 @@ +/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CORESIGHT_PRIV_H +#define _CORESIGHT_PRIV_H + +#include +#include +#include + +/* + * Coresight management registers (0xf00-0xfcc) + * 0xfa0 - 0xfa4: Management registers in PFTv1.0 + * Trace registers in PFTv1.1 + */ +#define CORESIGHT_ITCTRL 0xf00 +#define CORESIGHT_CLAIMSET 0xfa0 +#define CORESIGHT_CLAIMCLR 0xfa4 +#define CORESIGHT_LAR 0xfb0 +#define CORESIGHT_LSR 0xfb4 +#define CORESIGHT_AUTHSTATUS 0xfb8 +#define CORESIGHT_DEVID 0xfc8 +#define CORESIGHT_DEVTYPE 0xfcc + +#define TIMEOUT_US 100 +#define BMVAL(val, lsb, msb) ((val & GENMASK(msb, lsb)) >> lsb) + +static inline void CS_LOCK(void __iomem *addr) +{ + do { + /* Wait for things to settle */ + mb(); + writel_relaxed(0x0, addr + CORESIGHT_LAR); + } while (0); +} + +static inline void CS_UNLOCK(void __iomem *addr) +{ + do { + writel_relaxed(CORESIGHT_UNLOCK, addr + CORESIGHT_LAR); + /* Make sure eveyone has seen this */ + mb(); + } while (0); +} + +#ifdef CONFIG_CORESIGHT_SOURCE_ETM3X +extern int etm_readl_cp14(u32 off, unsigned int *val); +extern int etm_writel_cp14(u32 off, u32 val); +#else +static inline int etm_readl_cp14(u32 off, unsigned int *val) { return 0; } +static inline int etm_writel_cp14(u32 val, u32 off) { return 0; } +#endif + +#endif diff --git a/drivers/coresight/coresight.c b/drivers/coresight/coresight.c new file mode 100644 index 000000000000..6e0181f84425 --- /dev/null +++ b/drivers/coresight/coresight.c @@ -0,0 +1,717 @@ +/* Copyright (c) 2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "coresight-priv.h" + +static DEFINE_MUTEX(coresight_mutex); + +static int coresight_id_match(struct device *dev, void *data) +{ + int trace_id, i_trace_id; + struct coresight_device *csdev, *i_csdev; + + csdev = data; + i_csdev = to_coresight_device(dev); + + /* + * No need to care about oneself and components that are not + * sources or not enabled + */ + if (i_csdev == csdev || !i_csdev->enable || + i_csdev->type != CORESIGHT_DEV_TYPE_SOURCE) + return 0; + + /* Get the source ID for both compoment */ + trace_id = source_ops(csdev)->trace_id(csdev); + i_trace_id = source_ops(i_csdev)->trace_id(i_csdev); + + /* All you need is one */ + if (trace_id == i_trace_id) + return 1; + + return 0; +} + +static int coresight_source_is_unique(struct coresight_device *csdev) +{ + int trace_id = source_ops(csdev)->trace_id(csdev); + + /* this shouldn't happen */ + if (trace_id < 0) + return 0; + + return !bus_for_each_dev(&coresight_bustype, NULL, + csdev, coresight_id_match); +} + +static int coresight_find_link_inport(struct coresight_device *csdev) +{ + int i; + struct coresight_device *parent; + struct coresight_connection *conn; + + parent = container_of(csdev->path_link.next, + struct coresight_device, path_link); + + for (i = 0; i < parent->nr_outport; i++) { + conn = &parent->conns[i]; + if (conn->child_dev == csdev) + return conn->child_port; + } + + dev_err(&csdev->dev, "couldn't find inport, parent: %s, child: %s\n", + dev_name(&parent->dev), dev_name(&csdev->dev)); + + return 0; +} + +static int coresight_find_link_outport(struct coresight_device *csdev) +{ + int i; + struct coresight_device *child; + struct coresight_connection *conn; + + child = container_of(csdev->path_link.prev, + struct coresight_device, path_link); + + for (i = 0; i < csdev->nr_outport; i++) { + conn = &csdev->conns[i]; + if (conn->child_dev == child) + return conn->outport; + } + + dev_err(&csdev->dev, "couldn't find outport, parent: %s, child: %s\n", + dev_name(&csdev->dev), dev_name(&child->dev)); + + return 0; +} + +static int coresight_enable_sink(struct coresight_device *csdev) +{ + int ret; + + if (!csdev->enable) { + if (sink_ops(csdev)->enable) { + ret = sink_ops(csdev)->enable(csdev); + if (ret) + return ret; + } + csdev->enable = true; + } + + atomic_inc(csdev->refcnt); + + return 0; +} + +static void coresight_disable_sink(struct coresight_device *csdev) +{ + if (atomic_dec_return(csdev->refcnt) == 0) { + if (sink_ops(csdev)->disable) { + sink_ops(csdev)->disable(csdev); + csdev->enable = false; + } + } +} + +static int coresight_enable_link(struct coresight_device *csdev) +{ + int ret; + int link_subtype; + int refport, inport, outport; + + inport = coresight_find_link_inport(csdev); + outport = coresight_find_link_outport(csdev); + link_subtype = csdev->subtype.link_subtype; + + if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) + refport = inport; + else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT) + refport = outport; + else + refport = 0; + + if (atomic_inc_return(&csdev->refcnt[refport]) == 1) { + if (link_ops(csdev)->enable) { + ret = link_ops(csdev)->enable(csdev, inport, outport); + if (ret) + return ret; + } + } + + csdev->enable = true; + + return 0; +} + +static void coresight_disable_link(struct coresight_device *csdev) +{ + int i, nr_conns; + int link_subtype; + int refport, inport, outport; + + inport = coresight_find_link_inport(csdev); + outport = coresight_find_link_outport(csdev); + link_subtype = csdev->subtype.link_subtype; + + if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) { + refport = inport; + nr_conns = csdev->nr_inport; + } else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT) { + refport = outport; + nr_conns = csdev->nr_outport; + } else { + refport = 0; + nr_conns = 1; + } + + if (atomic_dec_return(&csdev->refcnt[refport]) == 0) { + if (link_ops(csdev)->disable) + link_ops(csdev)->disable(csdev, inport, outport); + } + + for (i = 0; i < nr_conns; i++) + if (atomic_read(&csdev->refcnt[i]) != 0) + return; + + csdev->enable = false; +} + +static int coresight_enable_source(struct coresight_device *csdev) +{ + int ret; + + if (!coresight_source_is_unique(csdev)) { + dev_warn(&csdev->dev, "traceID %d not unique\n", + source_ops(csdev)->trace_id(csdev)); + return -EINVAL; + } + + if (!csdev->enable) { + if (source_ops(csdev)->enable) { + ret = source_ops(csdev)->enable(csdev); + if (ret) + return ret; + } + csdev->enable = true; + } + + atomic_inc(csdev->refcnt); + + return 0; +} + +static void coresight_disable_source(struct coresight_device *csdev) +{ + if (atomic_dec_return(csdev->refcnt) == 0) { + if (source_ops(csdev)->disable) { + source_ops(csdev)->disable(csdev); + csdev->enable = false; + } + } +} + +static int coresight_enable_path(struct list_head *path) +{ + int ret = 0; + struct coresight_device *cd; + + list_for_each_entry(cd, path, path_link) { + if (cd == list_first_entry(path, struct coresight_device, + path_link)) { + ret = coresight_enable_sink(cd); + } else if (list_is_last(&cd->path_link, path)) { + /* + * Don't enable the source just yet - this needs to + * happen at the very end when all links and sink + * along the path have been configured properly. + */ + ; + } else { + ret = coresight_enable_link(cd); + } + if (ret) + goto err; + } + + return 0; +err: + list_for_each_entry_continue_reverse(cd, path, path_link) { + if (cd == list_first_entry(path, struct coresight_device, + path_link)) { + coresight_disable_sink(cd); + } else if (list_is_last(&cd->path_link, path)) { + ; + } else { + coresight_disable_link(cd); + } + } + + return ret; +} + +static int coresight_disable_path(struct list_head *path) +{ + struct coresight_device *cd; + + list_for_each_entry_reverse(cd, path, path_link) { + if (cd == list_first_entry(path, struct coresight_device, + path_link)) { + coresight_disable_sink(cd); + } else if (list_is_last(&cd->path_link, path)) { + /* + * The source has already been stopped, no need + * to do it again here. + */ + ; + } else { + coresight_disable_link(cd); + } + } + + return 0; +} + +static int coresight_build_paths(struct coresight_device *csdev, + struct list_head *path, + bool enable) +{ + int i, ret = -EINVAL; + struct coresight_connection *conn; + + list_add(&csdev->path_link, path); + + if (csdev->type == CORESIGHT_DEV_TYPE_SINK && csdev->activated) { + if (enable) + ret = coresight_enable_path(path); + else + ret = coresight_disable_path(path); + } else { + for (i = 0; i < csdev->nr_outport; i++) { + conn = &csdev->conns[i]; + if (coresight_build_paths(conn->child_dev, + path, enable) == 0) + ret = 0; + } + } + + if (list_first_entry(path, struct coresight_device, path_link) != csdev) + dev_err(&csdev->dev, "wrong device in %s\n", __func__); + + list_del(&csdev->path_link); + + return ret; +} + +int coresight_enable(struct coresight_device *csdev) +{ + int ret = 0; + LIST_HEAD(path); + + mutex_lock(&coresight_mutex); + if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) { + ret = -EINVAL; + dev_err(&csdev->dev, "wrong device type in %s\n", __func__); + goto out; + } + if (csdev->enable) + goto out; + + if (coresight_build_paths(csdev, &path, true)) { + dev_err(&csdev->dev, "building path(s) failed\n"); + goto out; + } + + if (coresight_enable_source(csdev)) + dev_err(&csdev->dev, "source enable failed\n"); +out: + mutex_unlock(&coresight_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(coresight_enable); + +void coresight_disable(struct coresight_device *csdev) +{ + LIST_HEAD(path); + + mutex_lock(&coresight_mutex); + if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) { + dev_err(&csdev->dev, "wrong device type in %s\n", __func__); + goto out; + } + if (!csdev->enable) + goto out; + + coresight_disable_source(csdev); + if (coresight_build_paths(csdev, &path, false)) + dev_err(&csdev->dev, "releasing path(s) failed\n"); + +out: + mutex_unlock(&coresight_mutex); +} +EXPORT_SYMBOL_GPL(coresight_disable); + +static ssize_t enable_sink_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct coresight_device *csdev = to_coresight_device(dev); + + return scnprintf(buf, PAGE_SIZE, "%u\n", (unsigned)csdev->activated); +} + +static ssize_t enable_sink_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret; + unsigned long val; + struct coresight_device *csdev = to_coresight_device(dev); + + ret = kstrtoul(buf, 10, &val); + if (ret) + return ret; + + if (val) + csdev->activated = true; + else + csdev->activated = false; + + return size; + +} +static DEVICE_ATTR_RW(enable_sink); + +static ssize_t enable_source_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct coresight_device *csdev = to_coresight_device(dev); + + return scnprintf(buf, PAGE_SIZE, "%u\n", (unsigned)csdev->enable); +} + +static ssize_t enable_source_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret = 0; + unsigned long val; + struct coresight_device *csdev = to_coresight_device(dev); + + ret = kstrtoul(buf, 10, &val); + if (ret) + return ret; + + if (val) { + ret = coresight_enable(csdev); + if (ret) + return ret; + } else { + coresight_disable(csdev); + } + + return size; +} +static DEVICE_ATTR_RW(enable_source); + +static struct attribute *coresight_sink_attrs[] = { + &dev_attr_enable_sink.attr, + NULL, +}; +ATTRIBUTE_GROUPS(coresight_sink); + +static struct attribute *coresight_source_attrs[] = { + &dev_attr_enable_source.attr, + NULL, +}; +ATTRIBUTE_GROUPS(coresight_source); + +static struct device_type coresight_dev_type[] = { + { + .name = "none", + }, + { + .name = "sink", + .groups = coresight_sink_groups, + }, + { + .name = "link", + }, + { + .name = "linksink", + .groups = coresight_sink_groups, + }, + { + .name = "source", + .groups = coresight_source_groups, + }, +}; + +static void coresight_device_release(struct device *dev) +{ + struct coresight_device *csdev = to_coresight_device(dev); + + kfree(csdev); +} + +static int coresight_orphan_match(struct device *dev, void *data) +{ + int i; + bool still_orphan = false; + struct coresight_device *csdev, *i_csdev; + struct coresight_connection *conn; + + csdev = data; + i_csdev = to_coresight_device(dev); + + /* No need to check oneself */ + if (csdev == i_csdev) + return 0; + + /* Move on to another component if no connection is orphan */ + if (!i_csdev->orphan) + return 0; + /* + * Circle throuch all the connection of that component. If we find + * an orphan connection whose name matches @csdev, link it. + */ + for (i = 0; i < i_csdev->nr_outport; i++) { + conn = &i_csdev->conns[i]; + + /* We have found at least one orphan connection */ + if (conn->child_dev == NULL) { + /* Does it match this newly added device? */ + if (!strcmp(dev_name(&csdev->dev), conn->child_name)) + conn->child_dev = csdev; + } else { + /* Too bad, this component still has an orphan */ + still_orphan = true; + } + } + + i_csdev->orphan = still_orphan; + + /* + * Returning '0' ensures that all known component on the + * bus will be checked. + */ + return 0; +} + +static void coresight_fixup_orphan_conns(struct coresight_device *csdev) +{ + /* + * No need to check for a return value as orphan connection(s) + * are hooked-up with each newly added component. + */ + bus_for_each_dev(&coresight_bustype, NULL, + csdev, coresight_orphan_match); +} + + +static int coresight_name_match(struct device *dev, void *data) +{ + char *to_match; + struct coresight_device *i_csdev; + + to_match = data; + i_csdev = to_coresight_device(dev); + + if (!strcmp(to_match, dev_name(&i_csdev->dev))) + return 1; + + return 0; +} + +static void coresight_fixup_device_conns(struct coresight_device *csdev) +{ + int i; + struct device *dev = NULL; + struct coresight_connection *conn; + + for (i = 0; i < csdev->nr_outport; i++) { + conn = &csdev->conns[i]; + dev = bus_find_device(&coresight_bustype, NULL, + (void *)conn->child_name, + coresight_name_match); + + if (dev) { + conn->child_dev = to_coresight_device(dev); + } else { + csdev->orphan = true; + conn->child_dev = NULL; + } + } +} + +/** + * coresight_timeout - loop until a bit has changed to a specific state. + * @addr: base address of the area of interest. + * @offset: address of a register, starting from @addr. + * @position: the position of the bit of interest. + * @value: the value the bit should have. + * + * Return: 0 as soon as the bit has taken the desired state or -EAGAIN if + * TIMEOUT_US has elapsed, which ever happens first. + */ + +int coresight_timeout(void __iomem *addr, u32 offset, int position, int value) +{ + int i; + u32 val; + + for (i = TIMEOUT_US; i > 0; i--) { + val = __raw_readl(addr + offset); + /* waiting on the bit to go from 0 to 1 */ + if (value) { + if (val & BIT(position)) + return 0; + /* waiting on the bit to go from 1 to 0 */ + } else { + if (!(val & BIT(position))) + return 0; + } + + /* + * Delay is arbitrary - the specification doesn't say how long + * we are expected to wait. Extra check required to make sure + * we don't wait needlessly on the last iteration. + */ + if (i - 1) + udelay(1); + } + + return -EAGAIN; +} + +struct bus_type coresight_bustype = { + .name = "coresight", +}; + +static int __init coresight_init(void) +{ + return bus_register(&coresight_bustype); +} +postcore_initcall(coresight_init); + +struct coresight_device *coresight_register(struct coresight_desc *desc) +{ + int i; + int ret; + int link_subtype; + int nr_refcnts = 1; + atomic_t *refcnts = NULL; + struct coresight_device *csdev; + struct coresight_connection *conns; + + csdev = kzalloc(sizeof(*csdev), GFP_KERNEL); + if (!csdev) { + ret = -ENOMEM; + goto err_kzalloc_csdev; + } + + if (desc->type == CORESIGHT_DEV_TYPE_LINK || + desc->type == CORESIGHT_DEV_TYPE_LINKSINK) { + link_subtype = desc->subtype.link_subtype; + + if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) + nr_refcnts = desc->pdata->nr_inport; + else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT) + nr_refcnts = desc->pdata->nr_outport; + } + + refcnts = kcalloc(nr_refcnts, sizeof(*refcnts), GFP_KERNEL); + if (!refcnts) { + ret = -ENOMEM; + goto err_kzalloc_refcnts; + } + + csdev->refcnt = refcnts; + + csdev->nr_inport = desc->pdata->nr_inport; + csdev->nr_outport = desc->pdata->nr_outport; + conns = kcalloc(csdev->nr_outport, sizeof(*conns), GFP_KERNEL); + if (!conns) { + ret = -ENOMEM; + goto err_kzalloc_conns; + } + + for (i = 0; i < csdev->nr_outport; i++) { + conns[i].outport = desc->pdata->outports[i]; + conns[i].child_name = desc->pdata->child_names[i]; + conns[i].child_port = desc->pdata->child_ports[i]; + } + + csdev->conns = conns; + + csdev->type = desc->type; + csdev->subtype = desc->subtype; + csdev->ops = desc->ops; + csdev->orphan = false; + + csdev->dev.type = &coresight_dev_type[desc->type]; + csdev->dev.groups = desc->groups; + csdev->dev.parent = desc->dev; + csdev->dev.release = coresight_device_release; + csdev->dev.bus = &coresight_bustype; + dev_set_name(&csdev->dev, "%s", desc->pdata->name); + + ret = device_register(&csdev->dev); + if (ret) + goto err_device_register; + + mutex_lock(&coresight_mutex); + + coresight_fixup_device_conns(csdev); + coresight_fixup_orphan_conns(csdev); + + mutex_unlock(&coresight_mutex); + + return csdev; + +err_device_register: + kfree(conns); +err_kzalloc_conns: + kfree(refcnts); +err_kzalloc_refcnts: + kfree(csdev); +err_kzalloc_csdev: + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(coresight_register); + +void coresight_unregister(struct coresight_device *csdev) +{ + mutex_lock(&coresight_mutex); + + kfree(csdev->conns); + device_unregister(&csdev->dev); + + mutex_unlock(&coresight_mutex); +} +EXPORT_SYMBOL_GPL(coresight_unregister); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/coresight/of_coresight.c b/drivers/coresight/of_coresight.c new file mode 100644 index 000000000000..5030c0734508 --- /dev/null +++ b/drivers/coresight/of_coresight.c @@ -0,0 +1,204 @@ +/* Copyright (c) 2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +static int of_dev_node_match(struct device *dev, void *data) +{ + return dev->of_node == data; +} + +static struct device * +of_coresight_get_endpoint_device(struct device_node *endpoint) +{ + struct device *dev = NULL; + + /* + * If we have a non-configuable replicator, it will be found on the + * platform bus. + */ + dev = bus_find_device(&platform_bus_type, NULL, + endpoint, of_dev_node_match); + if (dev) + return dev; + + /* + * We have a configurable component - circle through the AMBA bus + * looking for the device that matches the endpoint node. + */ + return bus_find_device(&amba_bustype, NULL, + endpoint, of_dev_node_match); +} + +static struct device_node *of_get_coresight_endpoint( + const struct device_node *parent, struct device_node *prev) +{ + struct device_node *node = of_graph_get_next_endpoint(parent, prev); + + of_node_put(prev); + return node; +} + +static void of_coresight_get_ports(struct device_node *node, + int *nr_inport, int *nr_outport) +{ + struct device_node *ep = NULL; + int in = 0, out = 0; + + do { + ep = of_get_coresight_endpoint(node, ep); + if (!ep) + break; + + if (of_property_read_bool(ep, "slave-mode")) + in++; + else + out++; + + } while (ep); + + *nr_inport = in; + *nr_outport = out; +} + +static int of_coresight_alloc_memory(struct device *dev, + struct coresight_platform_data *pdata) +{ + /* List of output port on this component */ + pdata->outports = devm_kzalloc(dev, pdata->nr_outport * + sizeof(*pdata->outports), + GFP_KERNEL); + if (!pdata->outports) + return -ENOMEM; + + /* Children connected to this component via @outport */ + pdata->child_names = devm_kzalloc(dev, pdata->nr_outport * + sizeof(*pdata->child_names), + GFP_KERNEL); + if (!pdata->child_names) + return -ENOMEM; + + /* Port number on the child this component is connected to */ + pdata->child_ports = devm_kzalloc(dev, pdata->nr_outport * + sizeof(*pdata->child_ports), + GFP_KERNEL); + if (!pdata->child_ports) + return -ENOMEM; + + return 0; +} + +struct coresight_platform_data *of_get_coresight_platform_data( + struct device *dev, struct device_node *node) +{ + int i = 0, ret = 0; + struct coresight_platform_data *pdata; + struct of_endpoint endpoint, rendpoint; + struct device *rdev; + struct device_node *cpu; + struct device_node *ep = NULL; + struct device_node *rparent = NULL; + struct device_node *rport = NULL; + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); + + /* Use device name as debugfs handle */ + pdata->name = dev_name(dev); + + /* Get the number of input and output port for this component */ + of_coresight_get_ports(node, &pdata->nr_inport, &pdata->nr_outport); + + if (pdata->nr_outport) { + ret = of_coresight_alloc_memory(dev, pdata); + if (ret) + return ERR_PTR(ret); + + /* Iterate through each port to discover topology */ + do { + /* Get a handle on a port */ + ep = of_get_coresight_endpoint(node, ep); + if (!ep) + break; + + /* + * No need to deal with input ports, processing for as + * processing for output ports will deal with them. + */ + if (of_find_property(ep, "slave-mode", NULL)) + continue; + + /* Get a handle on the local endpoint */ + ret = of_graph_parse_endpoint(ep, &endpoint); + + if (ret) + continue; + + /* The local out port number */ + pdata->outports[i] = endpoint.id; + + /* + * Get a handle on the remote port and parent + * attached to it. + */ + rparent = of_graph_get_remote_port_parent(ep); + rport = of_graph_get_remote_port(ep); + + if (!rparent || !rport) + continue; + + if (of_graph_parse_endpoint(rport, &rendpoint)) + continue; + + rdev = of_coresight_get_endpoint_device(rparent); + if (!dev) + continue; + + pdata->child_names[i] = dev_name(rdev); + pdata->child_ports[i] = rendpoint.id; + + i++; + } while (ep); + } + + /* Affinity defaults to CPU0 */ + pdata->cpu = 0; + cpu = of_parse_phandle(node, "cpu", 0); + if (cpu) { + const u32 *mpidr; + int len, index; + + mpidr = of_get_property(cpu, "reg", &len); + if (mpidr && len == 4) { + index = get_logical_index(be32_to_cpup(mpidr)); + if (index != -EINVAL) + pdata->cpu = index; + } + } + + return pdata; +} +EXPORT_SYMBOL_GPL(of_get_coresight_platform_data); diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index c324f5700d1a..d024bd9c9d9b 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -23,6 +23,7 @@ #define AMBA_NR_IRQS 9 #define AMBA_CID 0xb105f00d +#define CORESIGHT_CID 0xb105900d struct clk; diff --git a/include/linux/coresight.h b/include/linux/coresight.h new file mode 100644 index 000000000000..bdde4199c74a --- /dev/null +++ b/include/linux/coresight.h @@ -0,0 +1,263 @@ +/* Copyright (c) 2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _LINUX_CORESIGHT_H +#define _LINUX_CORESIGHT_H + +#include + +/* Peripheral id registers (0xFD0-0xFEC) */ +#define CORESIGHT_PERIPHIDR4 0xfd0 +#define CORESIGHT_PERIPHIDR5 0xfd4 +#define CORESIGHT_PERIPHIDR6 0xfd8 +#define CORESIGHT_PERIPHIDR7 0xfdC +#define CORESIGHT_PERIPHIDR0 0xfe0 +#define CORESIGHT_PERIPHIDR1 0xfe4 +#define CORESIGHT_PERIPHIDR2 0xfe8 +#define CORESIGHT_PERIPHIDR3 0xfeC +/* Component id registers (0xFF0-0xFFC) */ +#define CORESIGHT_COMPIDR0 0xff0 +#define CORESIGHT_COMPIDR1 0xff4 +#define CORESIGHT_COMPIDR2 0xff8 +#define CORESIGHT_COMPIDR3 0xffC + +#define ETM_ARCH_V3_3 0x23 +#define ETM_ARCH_V3_5 0x25 +#define PFT_ARCH_V1_0 0x30 +#define PFT_ARCH_V1_1 0x31 + +#define CORESIGHT_UNLOCK 0xc5acce55 + +extern struct bus_type coresight_bustype; + +enum coresight_dev_type { + CORESIGHT_DEV_TYPE_NONE, + CORESIGHT_DEV_TYPE_SINK, + CORESIGHT_DEV_TYPE_LINK, + CORESIGHT_DEV_TYPE_LINKSINK, + CORESIGHT_DEV_TYPE_SOURCE, +}; + +enum coresight_dev_subtype_sink { + CORESIGHT_DEV_SUBTYPE_SINK_NONE, + CORESIGHT_DEV_SUBTYPE_SINK_PORT, + CORESIGHT_DEV_SUBTYPE_SINK_BUFFER, +}; + +enum coresight_dev_subtype_link { + CORESIGHT_DEV_SUBTYPE_LINK_NONE, + CORESIGHT_DEV_SUBTYPE_LINK_MERG, + CORESIGHT_DEV_SUBTYPE_LINK_SPLIT, + CORESIGHT_DEV_SUBTYPE_LINK_FIFO, +}; + +enum coresight_dev_subtype_source { + CORESIGHT_DEV_SUBTYPE_SOURCE_NONE, + CORESIGHT_DEV_SUBTYPE_SOURCE_PROC, + CORESIGHT_DEV_SUBTYPE_SOURCE_BUS, + CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE, +}; + +/** + * struct coresight_dev_subtype - further characterisation of a type + * @sink_subtype: type of sink this component is, as defined + by @coresight_dev_subtype_sink. + * @link_subtype: type of link this component is, as defined + by @coresight_dev_subtype_link. + * @source_subtype: type of source this component is, as defined + by @coresight_dev_subtype_source. + */ +struct coresight_dev_subtype { + enum coresight_dev_subtype_sink sink_subtype; + enum coresight_dev_subtype_link link_subtype; + enum coresight_dev_subtype_source source_subtype; +}; + +/** + * struct coresight_platform_data - data harvested from the DT specification + * @cpu: the CPU a source belongs to. Only applicable for ETM/PTMs. + * @name: name of the component as shown under sysfs. + * @nr_inport: number of input ports for this component. + * @outports: list of remote enpoint port number. + * @child_names:name of all child components connected to this device. + * @child_ports:child component port number the current component is + connected to. + * @nr_outport: number of output ports for this component. + * @clk: The clock this component is associated to. + */ +struct coresight_platform_data { + int cpu; + const char *name; + int nr_inport; + int *outports; + const char **child_names; + int *child_ports; + int nr_outport; + struct clk *clk; +}; + +/** + * struct coresight_desc - description of a component required from drivers + * @type: as defined by @coresight_dev_type. + * @subtype: as defined by @coresight_dev_subtype. + * @ops: generic operations for this component, as defined + by @coresight_ops. + * @pdata: platform data collected from DT. + * @dev: The device entity associated to this component. + * @groups :operations specific to this component. These will end up + in the component's sysfs sub-directory. + */ +struct coresight_desc { + enum coresight_dev_type type; + struct coresight_dev_subtype subtype; + const struct coresight_ops *ops; + struct coresight_platform_data *pdata; + struct device *dev; + const struct attribute_group **groups; +}; + +/** + * struct coresight_connection - representation of a single connection + * @ref_count: keeping count a port' references. + * @outport: a connection's output port number. + * @chid_name: remote component's name. + * @child_port: remote component's port number @output is connected to. + * @child_dev: a @coresight_device representation of the component + connected to @outport. + */ +struct coresight_connection { + int outport; + const char *child_name; + int child_port; + struct coresight_device *child_dev; +}; + +/** + * struct coresight_device - representation of a device as used by the framework + * @nr_inport: number of input port associated to this component. + * @nr_outport: number of output port associated to this component. + * @type: as defined by @coresight_dev_type. + * @subtype: as defined by @coresight_dev_subtype. + * @ops: generic operations for this component, as defined + by @coresight_ops. + * @dev: The device entity associated to this component. + * @refcnt: keep track of what is in use. + * @path_link: link of current component into the path being enabled. + * @orphan: true if the component has connections that haven't been linked. + * @enable: 'true' if component is currently part of an active path. + * @activated: 'true' only if a _sink_ has been activated. A sink can be + activated but not yet enabled. Enabling for a _sink_ + happens when a source has been selected for that it. + */ +struct coresight_device { + struct coresight_connection *conns; + int nr_inport; + int nr_outport; + enum coresight_dev_type type; + struct coresight_dev_subtype subtype; + const struct coresight_ops *ops; + struct device dev; + atomic_t *refcnt; + struct list_head path_link; + bool orphan; + bool enable; /* true only if configured as part of a path */ + bool activated; /* true only if a sink is part of a path */ +}; + +#define to_coresight_device(d) container_of(d, struct coresight_device, dev) + +#define source_ops(csdev) csdev->ops->source_ops +#define sink_ops(csdev) csdev->ops->sink_ops +#define link_ops(csdev) csdev->ops->link_ops + +#define CORESIGHT_DEBUGFS_ENTRY(__name, __entry_name, \ + __mode, __get, __set, __fmt) \ +DEFINE_SIMPLE_ATTRIBUTE(__name ## _ops, __get, __set, __fmt); \ +static const struct coresight_ops_entry __name ## _entry = { \ + .name = __entry_name, \ + .mode = __mode, \ + .ops = &__name ## _ops \ +} + +/** + * struct coresight_ops_sink - basic operations for a sink + * Operations available for sinks + * @enable: enables the sink. + * @disable: disables the sink. + */ +struct coresight_ops_sink { + int (*enable)(struct coresight_device *csdev); + void (*disable)(struct coresight_device *csdev); +}; + +/** + * struct coresight_ops_link - basic operations for a link + * Operations available for links. + * @enable: enables flow between iport and oport. + * @disable: disables flow between iport and oport. + */ +struct coresight_ops_link { + int (*enable)(struct coresight_device *csdev, int iport, int oport); + void (*disable)(struct coresight_device *csdev, int iport, int oport); +}; + +/** + * struct coresight_ops_source - basic operations for a source + * Operations available for sources. + * @trace_id: returns the value of the component's trace ID as known + to the HW. + * @enable: enables tracing from a source. + * @disable: disables tracing for a source. + */ +struct coresight_ops_source { + int (*trace_id)(struct coresight_device *csdev); + int (*enable)(struct coresight_device *csdev); + void (*disable)(struct coresight_device *csdev); +}; + +struct coresight_ops { + const struct coresight_ops_sink *sink_ops; + const struct coresight_ops_link *link_ops; + const struct coresight_ops_source *source_ops; +}; + +#ifdef CONFIG_CORESIGHT +extern struct coresight_device * +coresight_register(struct coresight_desc *desc); +extern void coresight_unregister(struct coresight_device *csdev); +extern int coresight_enable(struct coresight_device *csdev); +extern void coresight_disable(struct coresight_device *csdev); +extern int coresight_is_bit_set(u32 val, int position, int value); +extern int coresight_timeout(void __iomem *addr, u32 offset, + int position, int value); +#ifdef CONFIG_OF +extern struct coresight_platform_data *of_get_coresight_platform_data( + struct device *dev, struct device_node *node); +#endif +#else +static inline struct coresight_device * +coresight_register(struct coresight_desc *desc) { return NULL; } +static inline void coresight_unregister(struct coresight_device *csdev) {} +static inline int +coresight_enable(struct coresight_device *csdev) { return -ENOSYS; } +static inline void coresight_disable(struct coresight_device *csdev) {} +static inline int coresight_is_bit_set(u32 val, int position, int value) + { return 0; } +static inline int coresight_timeout(void __iomem *addr, u32 offset, + int position, int value) { return 1; } +#ifdef CONFIG_OF +static inline struct coresight_platform_data *of_get_coresight_platform_data( + struct device *dev, struct device_node *node) { return NULL; } +#endif +#endif + +#endif -- cgit v1.2.3-59-g8ed1b From fbace43e8817113475ebda00e28593baa436a131 Mon Sep 17 00:00:00 2001 From: Peter Rosin Date: Sat, 8 Nov 2014 14:40:17 +0100 Subject: ASoC: tfa9879: New driver for NXP Semiconductors TFA9879 amplifier. Signed-off-by: Peter Rosin Signed-off-by: Mark Brown --- MAINTAINERS | 6 + sound/soc/codecs/Kconfig | 5 + sound/soc/codecs/Makefile | 2 + sound/soc/codecs/tfa9879.c | 328 +++++++++++++++++++++++++++++++++++++++++++++ sound/soc/codecs/tfa9879.h | 202 ++++++++++++++++++++++++++++ 5 files changed, 543 insertions(+) create mode 100644 sound/soc/codecs/tfa9879.c create mode 100644 sound/soc/codecs/tfa9879.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index a20df9bf8ab0..28a33296a468 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6565,6 +6565,12 @@ S: Supported F: drivers/gpu/drm/i2c/tda998x_drv.c F: include/drm/i2c/tda998x.h +NXP TFA9879 DRIVER +M: Peter Rosin +L: alsa-devel@alsa-project.org (moderated for non-subscribers) +S: Maintained +F: sound/soc/codecs/tfa9879* + OMAP SUPPORT M: Tony Lindgren L: linux-omap@vger.kernel.org diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index a68d1731a8fd..99b34a97499f 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -101,6 +101,7 @@ config SND_SOC_ALL_CODECS select SND_SOC_STAC9766 if SND_SOC_AC97_BUS select SND_SOC_TAS2552 if I2C select SND_SOC_TAS5086 if I2C + select SND_SOC_TFA9879 if I2C select SND_SOC_TLV320AIC23_I2C if I2C select SND_SOC_TLV320AIC23_SPI if SPI_MASTER select SND_SOC_TLV320AIC26 if SPI_MASTER @@ -577,6 +578,10 @@ config SND_SOC_TAS5086 tristate "Texas Instruments TAS5086 speaker amplifier" depends on I2C +config SND_SOC_TFA9879 + tristate "NXP Semiconductors TFA9879 amplifier" + depends on I2C + config SND_SOC_TLV320AIC23 tristate diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile index 5dce451661e4..ccfa2ab158be 100644 --- a/sound/soc/codecs/Makefile +++ b/sound/soc/codecs/Makefile @@ -101,6 +101,7 @@ snd-soc-sta350-objs := sta350.o snd-soc-sta529-objs := sta529.o snd-soc-stac9766-objs := stac9766.o snd-soc-tas5086-objs := tas5086.o +snd-soc-tfa9879-objs := tfa9879.o snd-soc-tlv320aic23-objs := tlv320aic23.o snd-soc-tlv320aic23-i2c-objs := tlv320aic23-i2c.o snd-soc-tlv320aic23-spi-objs := tlv320aic23-spi.o @@ -274,6 +275,7 @@ obj-$(CONFIG_SND_SOC_STA529) += snd-soc-sta529.o obj-$(CONFIG_SND_SOC_STAC9766) += snd-soc-stac9766.o obj-$(CONFIG_SND_SOC_TAS2552) += snd-soc-tas2552.o obj-$(CONFIG_SND_SOC_TAS5086) += snd-soc-tas5086.o +obj-$(CONFIG_SND_SOC_TFA9879) += snd-soc-tfa9879.o obj-$(CONFIG_SND_SOC_TLV320AIC23) += snd-soc-tlv320aic23.o obj-$(CONFIG_SND_SOC_TLV320AIC23_I2C) += snd-soc-tlv320aic23-i2c.o obj-$(CONFIG_SND_SOC_TLV320AIC23_SPI) += snd-soc-tlv320aic23-spi.o diff --git a/sound/soc/codecs/tfa9879.c b/sound/soc/codecs/tfa9879.c new file mode 100644 index 000000000000..16f1b71edb55 --- /dev/null +++ b/sound/soc/codecs/tfa9879.c @@ -0,0 +1,328 @@ +/* + * tfa9879.c -- driver for NXP Semiconductors TFA9879 + * + * Copyright (C) 2014 Axentia Technologies AB + * Author: Peter Rosin + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "tfa9879.h" + +struct tfa9879_priv { + struct regmap *regmap; + int lsb_justified; +}; + +static int tfa9879_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +{ + struct snd_soc_codec *codec = dai->codec; + struct tfa9879_priv *tfa9879 = snd_soc_codec_get_drvdata(codec); + int fs; + int i2s_set = 0; + + switch (params_rate(params)) { + case 8000: + fs = TFA9879_I2S_FS_8000; + break; + case 11025: + fs = TFA9879_I2S_FS_11025; + break; + case 12000: + fs = TFA9879_I2S_FS_12000; + break; + case 16000: + fs = TFA9879_I2S_FS_16000; + break; + case 22050: + fs = TFA9879_I2S_FS_22050; + break; + case 24000: + fs = TFA9879_I2S_FS_24000; + break; + case 32000: + fs = TFA9879_I2S_FS_32000; + break; + case 44100: + fs = TFA9879_I2S_FS_44100; + break; + case 48000: + fs = TFA9879_I2S_FS_48000; + break; + case 64000: + fs = TFA9879_I2S_FS_64000; + break; + case 88200: + fs = TFA9879_I2S_FS_88200; + break; + case 96000: + fs = TFA9879_I2S_FS_96000; + break; + default: + return -EINVAL; + } + + switch (params_width(params)) { + case 16: + i2s_set = TFA9879_I2S_SET_LSB_J_16; + break; + case 24: + i2s_set = TFA9879_I2S_SET_LSB_J_24; + break; + default: + return -EINVAL; + } + + if (tfa9879->lsb_justified) + snd_soc_update_bits(codec, TFA9879_SERIAL_INTERFACE_1, + TFA9879_I2S_SET_MASK, + i2s_set << TFA9879_I2S_SET_SHIFT); + + snd_soc_update_bits(codec, TFA9879_SERIAL_INTERFACE_1, + TFA9879_I2S_FS_MASK, + fs << TFA9879_I2S_FS_SHIFT); + return 0; +} + +static int tfa9879_digital_mute(struct snd_soc_dai *dai, int mute) +{ + struct snd_soc_codec *codec = dai->codec; + + snd_soc_update_bits(codec, TFA9879_MISC_CONTROL, + TFA9879_S_MUTE_MASK, + !!mute << TFA9879_S_MUTE_SHIFT); + + return 0; +} + +static int tfa9879_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) +{ + struct snd_soc_codec *codec = dai->codec; + struct tfa9879_priv *tfa9879 = snd_soc_codec_get_drvdata(codec); + int i2s_set; + int sck_pol; + + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { + case SND_SOC_DAIFMT_CBS_CFS: + break; + default: + return -EINVAL; + } + + switch (fmt & SND_SOC_DAIFMT_INV_MASK) { + case SND_SOC_DAIFMT_NB_NF: + sck_pol = TFA9879_SCK_POL_NORMAL; + break; + case SND_SOC_DAIFMT_IB_NF: + sck_pol = TFA9879_SCK_POL_INVERSE; + break; + default: + return -EINVAL; + } + + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { + case SND_SOC_DAIFMT_I2S: + tfa9879->lsb_justified = 0; + i2s_set = TFA9879_I2S_SET_I2S_24; + break; + case SND_SOC_DAIFMT_LEFT_J: + tfa9879->lsb_justified = 0; + i2s_set = TFA9879_I2S_SET_MSB_J_24; + break; + case SND_SOC_DAIFMT_RIGHT_J: + tfa9879->lsb_justified = 1; + i2s_set = TFA9879_I2S_SET_LSB_J_24; + break; + default: + return -EINVAL; + } + + snd_soc_update_bits(codec, TFA9879_SERIAL_INTERFACE_1, + TFA9879_SCK_POL_MASK, + sck_pol << TFA9879_SCK_POL_SHIFT); + snd_soc_update_bits(codec, TFA9879_SERIAL_INTERFACE_1, + TFA9879_I2S_SET_MASK, + i2s_set << TFA9879_I2S_SET_SHIFT); + return 0; +} + +static struct reg_default tfa9879_regs[] = { + { TFA9879_DEVICE_CONTROL, 0x0000 }, /* 0x00 */ + { TFA9879_SERIAL_INTERFACE_1, 0x0a18 }, /* 0x01 */ + { TFA9879_PCM_IOM2_FORMAT_1, 0x0007 }, /* 0x02 */ + { TFA9879_SERIAL_INTERFACE_2, 0x0a18 }, /* 0x03 */ + { TFA9879_PCM_IOM2_FORMAT_2, 0x0007 }, /* 0x04 */ + { TFA9879_EQUALIZER_A1, 0x59dd }, /* 0x05 */ + { TFA9879_EQUALIZER_A2, 0xc63e }, /* 0x06 */ + { TFA9879_EQUALIZER_B1, 0x651a }, /* 0x07 */ + { TFA9879_EQUALIZER_B2, 0xe53e }, /* 0x08 */ + { TFA9879_EQUALIZER_C1, 0x4616 }, /* 0x09 */ + { TFA9879_EQUALIZER_C2, 0xd33e }, /* 0x0a */ + { TFA9879_EQUALIZER_D1, 0x4df3 }, /* 0x0b */ + { TFA9879_EQUALIZER_D2, 0xea3e }, /* 0x0c */ + { TFA9879_EQUALIZER_E1, 0x5ee0 }, /* 0x0d */ + { TFA9879_EQUALIZER_E2, 0xf93e }, /* 0x0e */ + { TFA9879_BYPASS_CONTROL, 0x0093 }, /* 0x0f */ + { TFA9879_DYNAMIC_RANGE_COMPR, 0x92ba }, /* 0x10 */ + { TFA9879_BASS_TREBLE, 0x12a5 }, /* 0x11 */ + { TFA9879_HIGH_PASS_FILTER, 0x0004 }, /* 0x12 */ + { TFA9879_VOLUME_CONTROL, 0x10bd }, /* 0x13 */ + { TFA9879_MISC_CONTROL, 0x0000 }, /* 0x14 */ +}; + +static bool tfa9879_volatile_reg(struct device *dev, unsigned int reg) +{ + return reg == TFA9879_MISC_STATUS; +} + +static const DECLARE_TLV_DB_SCALE(volume_tlv, -7050, 50, 1); +static const DECLARE_TLV_DB_SCALE(tb_gain_tlv, -1800, 200, 0); +static const char * const tb_freq_text[] = { + "Low", "Mid", "High" +}; +static const struct soc_enum treble_freq_enum = + SOC_ENUM_SINGLE(TFA9879_BASS_TREBLE, TFA9879_F_TRBLE_SHIFT, + ARRAY_SIZE(tb_freq_text), tb_freq_text); +static const struct soc_enum bass_freq_enum = + SOC_ENUM_SINGLE(TFA9879_BASS_TREBLE, TFA9879_F_BASS_SHIFT, + ARRAY_SIZE(tb_freq_text), tb_freq_text); + +static const struct snd_kcontrol_new tfa9879_controls[] = { + SOC_SINGLE_TLV("PCM Playback Volume", TFA9879_VOLUME_CONTROL, + TFA9879_VOL_SHIFT, 0xbd, 1, volume_tlv), + SOC_SINGLE_TLV("Treble Volume", TFA9879_BASS_TREBLE, + TFA9879_G_TRBLE_SHIFT, 18, 0, tb_gain_tlv), + SOC_SINGLE_TLV("Bass Volume", TFA9879_BASS_TREBLE, + TFA9879_G_BASS_SHIFT, 18, 0, tb_gain_tlv), + SOC_ENUM("Treble Corner Freq", treble_freq_enum), + SOC_ENUM("Bass Corner Freq", bass_freq_enum), +}; + +static const struct snd_soc_dapm_widget tfa9879_dapm_widgets[] = { +SND_SOC_DAPM_AIF_IN("AIFINL", "Playback", 0, SND_SOC_NOPM, 0, 0), +SND_SOC_DAPM_AIF_IN("AIFINR", "Playback", 1, SND_SOC_NOPM, 0, 0), +SND_SOC_DAPM_DAC("DAC", NULL, TFA9879_DEVICE_CONTROL, TFA9879_OPMODE_SHIFT, 0), +SND_SOC_DAPM_OUTPUT("LINEOUT"), +SND_SOC_DAPM_SUPPLY("POWER", TFA9879_DEVICE_CONTROL, TFA9879_POWERUP_SHIFT, 0, + NULL, 0), +}; + +static const struct snd_soc_dapm_route tfa9879_dapm_routes[] = { + { "DAC", NULL, "AIFINL" }, + { "DAC", NULL, "AIFINR" }, + + { "LINEOUT", NULL, "DAC" }, + + { "DAC", NULL, "POWER" }, +}; + +static const struct snd_soc_codec_driver tfa9879_codec = { + .controls = tfa9879_controls, + .num_controls = ARRAY_SIZE(tfa9879_controls), + + .dapm_widgets = tfa9879_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(tfa9879_dapm_widgets), + .dapm_routes = tfa9879_dapm_routes, + .num_dapm_routes = ARRAY_SIZE(tfa9879_dapm_routes), +}; + +static const struct regmap_config tfa9879_regmap = { + .reg_bits = 8, + .val_bits = 16, + + .volatile_reg = tfa9879_volatile_reg, + .max_register = TFA9879_MISC_STATUS, + .reg_defaults = tfa9879_regs, + .num_reg_defaults = ARRAY_SIZE(tfa9879_regs), + .cache_type = REGCACHE_RBTREE, +}; + +static const struct snd_soc_dai_ops tfa9879_dai_ops = { + .hw_params = tfa9879_hw_params, + .digital_mute = tfa9879_digital_mute, + .set_fmt = tfa9879_set_fmt, +}; + +#define TFA9879_RATES SNDRV_PCM_RATE_8000_96000 + +#define TFA9879_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \ + SNDRV_PCM_FMTBIT_S24_LE) + +static struct snd_soc_dai_driver tfa9879_dai = { + .name = "tfa9879-hifi", + .playback = { + .stream_name = "Playback", + .channels_min = 2, + .channels_max = 2, + .rates = TFA9879_RATES, + .formats = TFA9879_FORMATS, }, + .ops = &tfa9879_dai_ops, +}; + +static int tfa9879_i2c_probe(struct i2c_client *i2c, + const struct i2c_device_id *id) +{ + struct tfa9879_priv *tfa9879; + int i; + + tfa9879 = devm_kzalloc(&i2c->dev, sizeof(*tfa9879), GFP_KERNEL); + if (IS_ERR(tfa9879)) + return PTR_ERR(tfa9879); + + i2c_set_clientdata(i2c, tfa9879); + + tfa9879->regmap = devm_regmap_init_i2c(i2c, &tfa9879_regmap); + if (IS_ERR(tfa9879->regmap)) + return PTR_ERR(tfa9879->regmap); + + /* Ensure the device is in reset state */ + for (i = 0; i < ARRAY_SIZE(tfa9879_regs); i++) + regmap_write(tfa9879->regmap, + tfa9879_regs[i].reg, tfa9879_regs[i].def); + + return snd_soc_register_codec(&i2c->dev, &tfa9879_codec, + &tfa9879_dai, 1); +} + +static int tfa9879_i2c_remove(struct i2c_client *client) +{ + snd_soc_unregister_codec(&client->dev); + + return 0; +} + +static const struct i2c_device_id tfa9879_i2c_id[] = { + { "tfa9879", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, tfa9879_i2c_id); + +static struct i2c_driver tfa9879_i2c_driver = { + .driver = { + .name = "tfa9879", + .owner = THIS_MODULE, + }, + .probe = tfa9879_i2c_probe, + .remove = tfa9879_i2c_remove, + .id_table = tfa9879_i2c_id, +}; + +module_i2c_driver(tfa9879_i2c_driver); + +MODULE_DESCRIPTION("ASoC NXP Semiconductors TFA9879 driver"); +MODULE_AUTHOR("Peter Rosin "); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/codecs/tfa9879.h b/sound/soc/codecs/tfa9879.h new file mode 100644 index 000000000000..3408c90c4628 --- /dev/null +++ b/sound/soc/codecs/tfa9879.h @@ -0,0 +1,202 @@ +/* + * tfa9879.h -- driver for NXP Semiconductors TFA9879 + * + * Copyright (C) 2014 Axentia Technologies AB + * Author: Peter Rosin + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef _TFA9879_H +#define _TFA9879_H + +#define TFA9879_DEVICE_CONTROL 0x00 +#define TFA9879_SERIAL_INTERFACE_1 0x01 +#define TFA9879_PCM_IOM2_FORMAT_1 0x02 +#define TFA9879_SERIAL_INTERFACE_2 0x03 +#define TFA9879_PCM_IOM2_FORMAT_2 0x04 +#define TFA9879_EQUALIZER_A1 0x05 +#define TFA9879_EQUALIZER_A2 0x06 +#define TFA9879_EQUALIZER_B1 0x07 +#define TFA9879_EQUALIZER_B2 0x08 +#define TFA9879_EQUALIZER_C1 0x09 +#define TFA9879_EQUALIZER_C2 0x0a +#define TFA9879_EQUALIZER_D1 0x0b +#define TFA9879_EQUALIZER_D2 0x0c +#define TFA9879_EQUALIZER_E1 0x0d +#define TFA9879_EQUALIZER_E2 0x0e +#define TFA9879_BYPASS_CONTROL 0x0f +#define TFA9879_DYNAMIC_RANGE_COMPR 0x10 +#define TFA9879_BASS_TREBLE 0x11 +#define TFA9879_HIGH_PASS_FILTER 0x12 +#define TFA9879_VOLUME_CONTROL 0x13 +#define TFA9879_MISC_CONTROL 0x14 +#define TFA9879_MISC_STATUS 0x15 + +/* TFA9879_DEVICE_CONTROL */ +#define TFA9879_INPUT_SEL_MASK 0x0010 +#define TFA9879_INPUT_SEL_SHIFT 4 +#define TFA9879_OPMODE_MASK 0x0008 +#define TFA9879_OPMODE_SHIFT 3 +#define TFA9879_RESET_MASK 0x0002 +#define TFA9879_RESET_SHIFT 1 +#define TFA9879_POWERUP_MASK 0x0001 +#define TFA9879_POWERUP_SHIFT 0 + +/* TFA9879_SERIAL_INTERFACE */ +#define TFA9879_MONO_SEL_MASK 0x0c00 +#define TFA9879_MONO_SEL_SHIFT 10 +#define TFA9879_MONO_SEL_LEFT 0 +#define TFA9879_MONO_SEL_RIGHT 1 +#define TFA9879_MONO_SEL_BOTH 2 +#define TFA9879_I2S_FS_MASK 0x03c0 +#define TFA9879_I2S_FS_SHIFT 6 +#define TFA9879_I2S_FS_8000 0 +#define TFA9879_I2S_FS_11025 1 +#define TFA9879_I2S_FS_12000 2 +#define TFA9879_I2S_FS_16000 3 +#define TFA9879_I2S_FS_22050 4 +#define TFA9879_I2S_FS_24000 5 +#define TFA9879_I2S_FS_32000 6 +#define TFA9879_I2S_FS_44100 7 +#define TFA9879_I2S_FS_48000 8 +#define TFA9879_I2S_FS_64000 9 +#define TFA9879_I2S_FS_88200 10 +#define TFA9879_I2S_FS_96000 11 +#define TFA9879_I2S_SET_MASK 0x0038 +#define TFA9879_I2S_SET_SHIFT 3 +#define TFA9879_I2S_SET_MSB_J_24 2 +#define TFA9879_I2S_SET_I2S_24 3 +#define TFA9879_I2S_SET_LSB_J_16 4 +#define TFA9879_I2S_SET_LSB_J_18 5 +#define TFA9879_I2S_SET_LSB_J_20 6 +#define TFA9879_I2S_SET_LSB_J_24 7 +#define TFA9879_SCK_POL_MASK 0x0004 +#define TFA9879_SCK_POL_SHIFT 2 +#define TFA9879_SCK_POL_NORMAL 0 +#define TFA9879_SCK_POL_INVERSE 1 +#define TFA9879_I_MODE_MASK 0x0003 +#define TFA9879_I_MODE_SHIFT 0 +#define TFA9879_I_MODE_I2S 0 +#define TFA9879_I_MODE_PCM_IOM2_SHORT 1 +#define TFA9879_I_MODE_PCM_IOM2_LONG 2 + +/* TFA9879_PCM_IOM2_FORMAT */ +#define TFA9879_PCM_FS_MASK 0x0800 +#define TFA9879_PCM_FS_SHIFT 11 +#define TFA9879_A_LAW_MASK 0x0400 +#define TFA9879_A_LAW_SHIFT 10 +#define TFA9879_PCM_COMP_MASK 0x0200 +#define TFA9879_PCM_COMP_SHIFT 9 +#define TFA9879_PCM_DL_MASK 0x0100 +#define TFA9879_PCM_DL_SHIFT 8 +#define TFA9879_D1_SLOT_MASK 0x00f0 +#define TFA9879_D1_SLOT_SHIFT 4 +#define TFA9879_D2_SLOT_MASK 0x000f +#define TFA9879_D2_SLOT_SHIFT 0 + +/* TFA9879_EQUALIZER_X1 */ +#define TFA9879_T1_MASK 0x8000 +#define TFA9879_T1_SHIFT 15 +#define TFA9879_K1M_MASK 0x7ff0 +#define TFA9879_K1M_SHIFT 4 +#define TFA9879_K1E_MASK 0x000f +#define TFA9879_K1E_SHIFT 0 + +/* TFA9879_EQUALIZER_X2 */ +#define TFA9879_T2_MASK 0x8000 +#define TFA9879_T2_SHIFT 15 +#define TFA9879_K2M_MASK 0x7800 +#define TFA9879_K2M_SHIFT 11 +#define TFA9879_K2E_MASK 0x0700 +#define TFA9879_K2E_SHIFT 8 +#define TFA9879_K0_MASK 0x00fe +#define TFA9879_K0_SHIFT 1 +#define TFA9879_S_MASK 0x0001 +#define TFA9879_S_SHIFT 0 + +/* TFA9879_BYPASS_CONTROL */ +#define TFA9879_L_OCP_MASK 0x00c0 +#define TFA9879_L_OCP_SHIFT 6 +#define TFA9879_L_OTP_MASK 0x0030 +#define TFA9879_L_OTP_SHIFT 4 +#define TFA9879_CLIPCTRL_MASK 0x0008 +#define TFA9879_CLIPCTRL_SHIFT 3 +#define TFA9879_HPF_BP_MASK 0x0004 +#define TFA9879_HPF_BP_SHIFT 2 +#define TFA9879_DRC_BP_MASK 0x0002 +#define TFA9879_DRC_BP_SHIFT 1 +#define TFA9879_EQ_BP_MASK 0x0001 +#define TFA9879_EQ_BP_SHIFT 0 + +/* TFA9879_DYNAMIC_RANGE_COMPR */ +#define TFA9879_AT_LVL_MASK 0xf000 +#define TFA9879_AT_LVL_SHIFT 12 +#define TFA9879_AT_RATE_MASK 0x0f00 +#define TFA9879_AT_RATE_SHIFT 8 +#define TFA9879_RL_LVL_MASK 0x00f0 +#define TFA9879_RL_LVL_SHIFT 4 +#define TFA9879_RL_RATE_MASK 0x000f +#define TFA9879_RL_RATE_SHIFT 0 + +/* TFA9879_BASS_TREBLE */ +#define TFA9879_G_TRBLE_MASK 0x3e00 +#define TFA9879_G_TRBLE_SHIFT 9 +#define TFA9879_F_TRBLE_MASK 0x0180 +#define TFA9879_F_TRBLE_SHIFT 7 +#define TFA9879_G_BASS_MASK 0x007c +#define TFA9879_G_BASS_SHIFT 2 +#define TFA9879_F_BASS_MASK 0x0003 +#define TFA9879_F_BASS_SHIFT 0 + +/* TFA9879_HIGH_PASS_FILTER */ +#define TFA9879_HP_CTRL_MASK 0x00ff +#define TFA9879_HP_CTRL_SHIFT 0 + +/* TFA9879_VOLUME_CONTROL */ +#define TFA9879_ZR_CRSS_MASK 0x1000 +#define TFA9879_ZR_CRSS_SHIFT 12 +#define TFA9879_VOL_MASK 0x00ff +#define TFA9879_VOL_SHIFT 0 + +/* TFA9879_MISC_CONTROL */ +#define TFA9879_DE_PHAS_MASK 0x0c00 +#define TFA9879_DE_PHAS_SHIFT 10 +#define TFA9879_H_MUTE_MASK 0x0200 +#define TFA9879_H_MUTE_SHIFT 9 +#define TFA9879_S_MUTE_MASK 0x0100 +#define TFA9879_S_MUTE_SHIFT 8 +#define TFA9879_P_LIM_MASK 0x00ff +#define TFA9879_P_LIM_SHIFT 0 + +/* TFA9879_MISC_STATUS */ +#define TFA9879_PS_MASK 0x4000 +#define TFA9879_PS_SHIFT 14 +#define TFA9879_PORA_MASK 0x2000 +#define TFA9879_PORA_SHIFT 13 +#define TFA9879_AMP_MASK 0x0600 +#define TFA9879_AMP_SHIFT 9 +#define TFA9879_IBP_2_MASK 0x0100 +#define TFA9879_IBP_2_SHIFT 8 +#define TFA9879_OFP_2_MASK 0x0080 +#define TFA9879_OFP_2_SHIFT 7 +#define TFA9879_UFP_2_MASK 0x0040 +#define TFA9879_UFP_2_SHIFT 6 +#define TFA9879_IBP_1_MASK 0x0020 +#define TFA9879_IBP_1_SHIFT 5 +#define TFA9879_OFP_1_MASK 0x0010 +#define TFA9879_OFP_1_SHIFT 4 +#define TFA9879_UFP_1_MASK 0x0008 +#define TFA9879_UFP_1_SHIFT 3 +#define TFA9879_OCPOKA_MASK 0x0004 +#define TFA9879_OCPOKA_SHIFT 2 +#define TFA9879_OCPOKB_MASK 0x0002 +#define TFA9879_OCPOKB_SHIFT 1 +#define TFA9879_OTPOK_MASK 0x0001 +#define TFA9879_OTPOK_SHIFT 0 + +#endif -- cgit v1.2.3-59-g8ed1b From 3ae75e02c34b5b8d521b0470522e540512ce24e3 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 9 Nov 2014 08:36:52 +0100 Subject: ieee802154: add new nl802154 header This patch adds the new userspace header for nl802154. We don't place this header in include/uapi now. This header could be modified in the next time. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- MAINTAINERS | 1 + include/net/nl802154.h | 122 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 123 insertions(+) create mode 100644 include/net/nl802154.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index b42eb50b7426..7ec37a396ffe 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4699,6 +4699,7 @@ F: net/mac802154/ F: drivers/net/ieee802154/ F: include/linux/nl802154.h F: include/linux/ieee802154.h +F: include/net/nl802154.h F: include/net/mac802154.h F: include/net/af_ieee802154.h F: include/net/cfg802154.h diff --git a/include/net/nl802154.h b/include/net/nl802154.h new file mode 100644 index 000000000000..6dbd406ca41b --- /dev/null +++ b/include/net/nl802154.h @@ -0,0 +1,122 @@ +#ifndef __NL802154_H +#define __NL802154_H +/* + * 802.15.4 netlink interface public header + * + * Copyright 2014 Alexander Aring + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +#define NL802154_GENL_NAME "nl802154" + +enum nl802154_commands { +/* don't change the order or add anything between, this is ABI! */ +/* currently we don't shipping this file via uapi, ignore the above one */ + NL802154_CMD_UNSPEC, + + NL802154_CMD_GET_WPAN_PHY, /* can dump */ + NL802154_CMD_SET_WPAN_PHY, + NL802154_CMD_NEW_WPAN_PHY, + NL802154_CMD_DEL_WPAN_PHY, + + NL802154_CMD_GET_INTERFACE, /* can dump */ + NL802154_CMD_SET_INTERFACE, + NL802154_CMD_NEW_INTERFACE, + NL802154_CMD_DEL_INTERFACE, + + NL802154_CMD_SET_CHANNEL, + + NL802154_CMD_SET_PAN_ID, + NL802154_CMD_SET_SHORT_ADDR, + + NL802154_CMD_SET_TX_POWER, + NL802154_CMD_SET_CCA_MODE, + NL802154_CMD_SET_CCA_ED_LEVEL, + + NL802154_CMD_SET_MAX_FRAME_RETRIES, + + NL802154_CMD_SET_BACKOFF_EXPONENT, + NL802154_CMD_SET_MAX_CSMA_BACKOFFS, + + NL802154_CMD_SET_LBT_MODE, + + /* add new commands above here */ + + /* used to define NL802154_CMD_MAX below */ + __NL802154_CMD_AFTER_LAST, + NL802154_CMD_MAX = __NL802154_CMD_AFTER_LAST - 1 +}; + +enum nl802154_attrs { +/* don't change the order or add anything between, this is ABI! */ +/* currently we don't shipping this file via uapi, ignore the above one */ + NL802154_ATTR_UNSPEC, + + NL802154_ATTR_WPAN_PHY, + NL802154_ATTR_WPAN_PHY_NAME, + + NL802154_ATTR_IFINDEX, + NL802154_ATTR_IFNAME, + NL802154_ATTR_IFTYPE, + + NL802154_ATTR_WPAN_DEV, + + NL802154_ATTR_PAGE, + NL802154_ATTR_CHANNEL, + + NL802154_ATTR_PAN_ID, + NL802154_ATTR_SHORT_ADDR, + + NL802154_ATTR_TX_POWER, + + NL802154_ATTR_CCA_MODE, + NL802154_ATTR_CCA_MODE3_AND, + NL802154_ATTR_CCA_ED_LEVEL, + + NL802154_ATTR_MAX_FRAME_RETRIES, + + NL802154_ATTR_MAX_BE, + NL802154_ATTR_MIN_BE, + NL802154_ATTR_MAX_CSMA_BACKOFFS, + + NL802154_ATTR_LBT_MODE, + + NL802154_ATTR_GENERATION, + + NL802154_ATTR_CHANNELS_SUPPORTED, + NL802154_ATTR_SUPPORTED_CHANNEL, + + NL802154_ATTR_EXTENDED_ADDR, + + /* add attributes here, update the policy in nl802154.c */ + + __NL802154_ATTR_AFTER_LAST, + NL802154_ATTR_MAX = __NL802154_ATTR_AFTER_LAST - 1 +}; + +enum nl802154_iftype { + /* for backwards compatibility TODO */ + NL802154_IFTYPE_UNSPEC = -1, + + NL802154_IFTYPE_NODE, + NL802154_IFTYPE_MONITOR, + NL802154_IFTYPE_COORD, + + /* keep last */ + NUM_NL802154_IFTYPES, + NL802154_IFTYPE_MAX = NUM_NL802154_IFTYPES - 1 +}; + +#endif /* __NL802154_H */ -- cgit v1.2.3-59-g8ed1b From d8ae3c33599ac5c7bc758d88529debbbe8c09265 Mon Sep 17 00:00:00 2001 From: Anish Bhatt Date: Tue, 4 Nov 2014 15:19:22 -0800 Subject: cxgbi: add maintainer for cxgb3i/cxgb4i Signed-off-by: Anish Bhatt Signed-off-by: Christoph Hellwig --- MAINTAINERS | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index ea4d0058fd1b..84d9c5dea6bc 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2744,6 +2744,13 @@ W: http://www.chelsio.com S: Supported F: drivers/net/ethernet/chelsio/cxgb3/ +CXGB3 ISCSI DRIVER (CXGB3I) +M: Karen Xie +L: linux-scsi@vger.kernel.org +W: http://www.chelsio.com +S: Supported +F: drivers/scsi/cxgbi/cxgb3i + CXGB3 IWARP RNIC DRIVER (IW_CXGB3) M: Steve Wise L: linux-rdma@vger.kernel.org @@ -2758,6 +2765,13 @@ W: http://www.chelsio.com S: Supported F: drivers/net/ethernet/chelsio/cxgb4/ +CXGB4 ISCSI DRIVER (CXGB4I) +M: Karen Xie +L: linux-scsi@vger.kernel.org +W: http://www.chelsio.com +S: Supported +F: drivers/scsi/cxgbi/cxgb4i + CXGB4 IWARP RNIC DRIVER (IW_CXGB4) M: Steve Wise L: linux-rdma@vger.kernel.org -- cgit v1.2.3-59-g8ed1b From dc68cd11f5a8662e50135a0e8e5e451494febd47 Mon Sep 17 00:00:00 2001 From: Felipe Balbi Date: Thu, 6 Nov 2014 08:37:19 -0600 Subject: MAINTAINERS: add more files under OMAP SUPPORT These files are very important to the healt of the OMAP architecture, specially when it comes to PM support which currently we have working for at least OMAP3 and we'd like to know about any changes being made to our PMICs and IRQ controllers. Signed-off-by: Felipe Balbi Signed-off-by: Tony Lindgren --- MAINTAINERS | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index dab92a78d1d5..45f8fac2a005 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6590,6 +6590,23 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap.git S: Maintained F: arch/arm/*omap*/ F: drivers/i2c/busses/i2c-omap.c +F: drivers/irqchip/irq-omap-intc.c +F: drivers/mfd/*omap*.c +F: drivers/mfd/menelaus.c +F: drivers/mfd/palmas.c +F: drivers/mfd/tps65217.c +F: drivers/mfd/tps65218.c +F: drivers/mfd/tps65910.c +F: drivers/mfd/twl-core.[ch] +F: drivers/mfd/twl4030*.c +F: drivers/mfd/twl6030*.c +F: drivers/mfd/twl6040*.c +F: drivers/regulator/palmas-regulator*.c +F: drivers/regulator/pbias-regulator.c +F: drivers/regulator/tps65217-regulator.c +F: drivers/regulator/tps65218-regulator.c +F: drivers/regulator/tps65910-regulator.c +F: drivers/regulator/twl-regulator.c F: include/linux/i2c-omap.h OMAP DEVICE TREE SUPPORT -- cgit v1.2.3-59-g8ed1b From 05eb20fa69dfdf9551a61397f16a41126ad43c55 Mon Sep 17 00:00:00 2001 From: Nishanth Menon Date: Tue, 21 Oct 2014 09:24:22 -0500 Subject: MAINTAINERS: Update entry for omap related .dts files to cover new SoCs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit DRA7(including AM5x) and AM47x series are handled under OMAP umbrella. These SoC support and dts have been added since 3.14 kernel and Pull requests for these have come in from OMAP till date. So just ensure that get_maintainers can pick up this list as well. Cc: linux-omap@vger.kernel.org Cc: devicetree@vger.kernel.org Signed-off-by: Nishanth Menon Acked-by: Benoît Cousson Signed-off-by: Tony Lindgren --- MAINTAINERS | 3 +++ 1 file changed, 3 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 45f8fac2a005..d0e1d2fc985c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6617,6 +6617,9 @@ L: devicetree@vger.kernel.org S: Maintained F: arch/arm/boot/dts/*omap* F: arch/arm/boot/dts/*am3* +F: arch/arm/boot/dts/*am4* +F: arch/arm/boot/dts/*am5* +F: arch/arm/boot/dts/*dra7* OMAP CLOCK FRAMEWORK SUPPORT M: Paul Walmsley -- cgit v1.2.3-59-g8ed1b From 36c0237f1ae08f9ba538ade0ab6375bd58bb996c Mon Sep 17 00:00:00 2001 From: Scott Branden Date: Fri, 12 Sep 2014 16:50:56 -0700 Subject: MAINTAINERS: Entry for Cygnus/iproc arm architecture Acked-by: Jonathan Richardson Signed-off-by: Scott Branden Signed-off-by: Florian Fainelli --- MAINTAINERS | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index a20df9bf8ab0..f946b0268ee2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2122,6 +2122,20 @@ L: linux-scsi@vger.kernel.org S: Supported F: drivers/scsi/bnx2i/ +BROADCOM CYGNUS/IPROC ARM ARCHITECTURE +M: Ray Jui +M: Scott Branden +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +L: bcm-kernel-feedback-list@broadcom.com +T: git git://git.github.com/brcm/linux.git +S: Maintained +N: iproc +N: cygnus +N: bcm9113* +N: bcm9583* +N: bcm583* +N: bcm113* + BROADCOM KONA GPIO DRIVER M: Ray Jui L: bcm-kernel-feedback-list@broadcom.com -- cgit v1.2.3-59-g8ed1b From f956165f07839a0643e4e01081787190692a7c29 Mon Sep 17 00:00:00 2001 From: Michael Turquette Date: Wed, 12 Nov 2014 16:43:47 -0800 Subject: MAINTAINERS: add Stephen Boyd as clk co-maintainer Signed-off-by: Michael Turquette --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index ea4d0058fd1b..46d652f166ab 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2491,6 +2491,7 @@ F: include/uapi/linux/coda*.h COMMON CLK FRAMEWORK M: Mike Turquette +M: Stephen Boyd L: linux-kernel@vger.kernel.org T: git git://git.linaro.org/people/mturquette/linux.git S: Maintained -- cgit v1.2.3-59-g8ed1b From f0a0a58e6f46c2dded813ee860b9cbd795b4e571 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Fri, 7 Nov 2014 21:58:21 +0100 Subject: ARM: at91: move sdramc/ddrsdr header to include/soc/at91 Move the (DDR) SDRAM controller headers to include/soc/at91 to remove the dependency on mach/ headers from the at91-reset driver. Signed-off-by: Alexandre Belloni Signed-off-by: Nicolas Ferre --- MAINTAINERS | 1 + arch/arm/mach-at91/include/mach/at91_ramc.h | 6 +- .../arm/mach-at91/include/mach/at91rm9200_sdramc.h | 63 ----------- arch/arm/mach-at91/include/mach/at91sam9_ddrsdr.h | 124 --------------------- arch/arm/mach-at91/include/mach/at91sam9_sdramc.h | 85 -------------- arch/arm/mach-at91/pm.h | 1 - drivers/power/reset/at91-reset.c | 4 +- include/soc/at91/at91rm9200_sdramc.h | 63 +++++++++++ include/soc/at91/at91sam9_ddrsdr.h | 124 +++++++++++++++++++++ include/soc/at91/at91sam9_sdramc.h | 85 ++++++++++++++ 10 files changed, 278 insertions(+), 278 deletions(-) delete mode 100644 arch/arm/mach-at91/include/mach/at91rm9200_sdramc.h delete mode 100644 arch/arm/mach-at91/include/mach/at91sam9_ddrsdr.h delete mode 100644 arch/arm/mach-at91/include/mach/at91sam9_sdramc.h create mode 100644 include/soc/at91/at91rm9200_sdramc.h create mode 100644 include/soc/at91/at91sam9_ddrsdr.h create mode 100644 include/soc/at91/at91sam9_sdramc.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index dab92a78d1d5..9b604e7cc869 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -861,6 +861,7 @@ W: http://maxim.org.za/at91_26.html W: http://www.linux4sam.org S: Supported F: arch/arm/mach-at91/ +F: include/soc/at91/ F: arch/arm/boot/dts/at91*.dts F: arch/arm/boot/dts/at91*.dtsi F: arch/arm/boot/dts/sama*.dts diff --git a/arch/arm/mach-at91/include/mach/at91_ramc.h b/arch/arm/mach-at91/include/mach/at91_ramc.h index d8aeb278614e..e4492b151fee 100644 --- a/arch/arm/mach-at91/include/mach/at91_ramc.h +++ b/arch/arm/mach-at91/include/mach/at91_ramc.h @@ -25,8 +25,8 @@ extern void __iomem *at91_ramc_base[]; #define AT91_MEMCTRL_SDRAMC 1 #define AT91_MEMCTRL_DDRSDR 2 -#include -#include -#include +#include +#include +#include #endif /* __AT91_RAMC_H__ */ diff --git a/arch/arm/mach-at91/include/mach/at91rm9200_sdramc.h b/arch/arm/mach-at91/include/mach/at91rm9200_sdramc.h deleted file mode 100644 index aa047f458f1b..000000000000 --- a/arch/arm/mach-at91/include/mach/at91rm9200_sdramc.h +++ /dev/null @@ -1,63 +0,0 @@ -/* - * arch/arm/mach-at91/include/mach/at91rm9200_sdramc.h - * - * Copyright (C) 2005 Ivan Kokshaysky - * Copyright (C) SAN People - * - * Memory Controllers (SDRAMC only) - System peripherals registers. - * Based on AT91RM9200 datasheet revision E. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef AT91RM9200_SDRAMC_H -#define AT91RM9200_SDRAMC_H - -/* SDRAM Controller registers */ -#define AT91RM9200_SDRAMC_MR 0x90 /* Mode Register */ -#define AT91RM9200_SDRAMC_MODE (0xf << 0) /* Command Mode */ -#define AT91RM9200_SDRAMC_MODE_NORMAL (0 << 0) -#define AT91RM9200_SDRAMC_MODE_NOP (1 << 0) -#define AT91RM9200_SDRAMC_MODE_PRECHARGE (2 << 0) -#define AT91RM9200_SDRAMC_MODE_LMR (3 << 0) -#define AT91RM9200_SDRAMC_MODE_REFRESH (4 << 0) -#define AT91RM9200_SDRAMC_DBW (1 << 4) /* Data Bus Width */ -#define AT91RM9200_SDRAMC_DBW_32 (0 << 4) -#define AT91RM9200_SDRAMC_DBW_16 (1 << 4) - -#define AT91RM9200_SDRAMC_TR 0x94 /* Refresh Timer Register */ -#define AT91RM9200_SDRAMC_COUNT (0xfff << 0) /* Refresh Timer Count */ - -#define AT91RM9200_SDRAMC_CR 0x98 /* Configuration Register */ -#define AT91RM9200_SDRAMC_NC (3 << 0) /* Number of Column Bits */ -#define AT91RM9200_SDRAMC_NC_8 (0 << 0) -#define AT91RM9200_SDRAMC_NC_9 (1 << 0) -#define AT91RM9200_SDRAMC_NC_10 (2 << 0) -#define AT91RM9200_SDRAMC_NC_11 (3 << 0) -#define AT91RM9200_SDRAMC_NR (3 << 2) /* Number of Row Bits */ -#define AT91RM9200_SDRAMC_NR_11 (0 << 2) -#define AT91RM9200_SDRAMC_NR_12 (1 << 2) -#define AT91RM9200_SDRAMC_NR_13 (2 << 2) -#define AT91RM9200_SDRAMC_NB (1 << 4) /* Number of Banks */ -#define AT91RM9200_SDRAMC_NB_2 (0 << 4) -#define AT91RM9200_SDRAMC_NB_4 (1 << 4) -#define AT91RM9200_SDRAMC_CAS (3 << 5) /* CAS Latency */ -#define AT91RM9200_SDRAMC_CAS_2 (2 << 5) -#define AT91RM9200_SDRAMC_TWR (0xf << 7) /* Write Recovery Delay */ -#define AT91RM9200_SDRAMC_TRC (0xf << 11) /* Row Cycle Delay */ -#define AT91RM9200_SDRAMC_TRP (0xf << 15) /* Row Precharge Delay */ -#define AT91RM9200_SDRAMC_TRCD (0xf << 19) /* Row to Column Delay */ -#define AT91RM9200_SDRAMC_TRAS (0xf << 23) /* Active to Precharge Delay */ -#define AT91RM9200_SDRAMC_TXSR (0xf << 27) /* Exit Self Refresh to Active Delay */ - -#define AT91RM9200_SDRAMC_SRR 0x9c /* Self Refresh Register */ -#define AT91RM9200_SDRAMC_LPR 0xa0 /* Low Power Register */ -#define AT91RM9200_SDRAMC_IER 0xa4 /* Interrupt Enable Register */ -#define AT91RM9200_SDRAMC_IDR 0xa8 /* Interrupt Disable Register */ -#define AT91RM9200_SDRAMC_IMR 0xac /* Interrupt Mask Register */ -#define AT91RM9200_SDRAMC_ISR 0xb0 /* Interrupt Status Register */ - -#endif diff --git a/arch/arm/mach-at91/include/mach/at91sam9_ddrsdr.h b/arch/arm/mach-at91/include/mach/at91sam9_ddrsdr.h deleted file mode 100644 index 0210797abf2e..000000000000 --- a/arch/arm/mach-at91/include/mach/at91sam9_ddrsdr.h +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Header file for the Atmel DDR/SDR SDRAM Controller - * - * Copyright (C) 2010 Atmel Corporation - * Nicolas Ferre - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ -#ifndef AT91SAM9_DDRSDR_H -#define AT91SAM9_DDRSDR_H - -#define AT91_DDRSDRC_MR 0x00 /* Mode Register */ -#define AT91_DDRSDRC_MODE (0x7 << 0) /* Command Mode */ -#define AT91_DDRSDRC_MODE_NORMAL 0 -#define AT91_DDRSDRC_MODE_NOP 1 -#define AT91_DDRSDRC_MODE_PRECHARGE 2 -#define AT91_DDRSDRC_MODE_LMR 3 -#define AT91_DDRSDRC_MODE_REFRESH 4 -#define AT91_DDRSDRC_MODE_EXT_LMR 5 -#define AT91_DDRSDRC_MODE_DEEP 6 - -#define AT91_DDRSDRC_RTR 0x04 /* Refresh Timer Register */ -#define AT91_DDRSDRC_COUNT (0xfff << 0) /* Refresh Timer Counter */ - -#define AT91_DDRSDRC_CR 0x08 /* Configuration Register */ -#define AT91_DDRSDRC_NC (3 << 0) /* Number of Column Bits */ -#define AT91_DDRSDRC_NC_SDR8 (0 << 0) -#define AT91_DDRSDRC_NC_SDR9 (1 << 0) -#define AT91_DDRSDRC_NC_SDR10 (2 << 0) -#define AT91_DDRSDRC_NC_SDR11 (3 << 0) -#define AT91_DDRSDRC_NC_DDR9 (0 << 0) -#define AT91_DDRSDRC_NC_DDR10 (1 << 0) -#define AT91_DDRSDRC_NC_DDR11 (2 << 0) -#define AT91_DDRSDRC_NC_DDR12 (3 << 0) -#define AT91_DDRSDRC_NR (3 << 2) /* Number of Row Bits */ -#define AT91_DDRSDRC_NR_11 (0 << 2) -#define AT91_DDRSDRC_NR_12 (1 << 2) -#define AT91_DDRSDRC_NR_13 (2 << 2) -#define AT91_DDRSDRC_NR_14 (3 << 2) -#define AT91_DDRSDRC_CAS (7 << 4) /* CAS Latency */ -#define AT91_DDRSDRC_CAS_2 (2 << 4) -#define AT91_DDRSDRC_CAS_3 (3 << 4) -#define AT91_DDRSDRC_CAS_25 (6 << 4) -#define AT91_DDRSDRC_RST_DLL (1 << 7) /* Reset DLL */ -#define AT91_DDRSDRC_DICDS (1 << 8) /* Output impedance control */ -#define AT91_DDRSDRC_DIS_DLL (1 << 9) /* Disable DLL [SAM9 Only] */ -#define AT91_DDRSDRC_OCD (1 << 12) /* Off-Chip Driver [SAM9 Only] */ -#define AT91_DDRSDRC_DQMS (1 << 16) /* Mask Data is Shared [SAM9 Only] */ -#define AT91_DDRSDRC_ACTBST (1 << 18) /* Active Bank X to Burst Stop Read Access Bank Y [SAM9 Only] */ - -#define AT91_DDRSDRC_T0PR 0x0C /* Timing 0 Register */ -#define AT91_DDRSDRC_TRAS (0xf << 0) /* Active to Precharge delay */ -#define AT91_DDRSDRC_TRCD (0xf << 4) /* Row to Column delay */ -#define AT91_DDRSDRC_TWR (0xf << 8) /* Write recovery delay */ -#define AT91_DDRSDRC_TRC (0xf << 12) /* Row cycle delay */ -#define AT91_DDRSDRC_TRP (0xf << 16) /* Row precharge delay */ -#define AT91_DDRSDRC_TRRD (0xf << 20) /* Active BankA to BankB */ -#define AT91_DDRSDRC_TWTR (0x7 << 24) /* Internal Write to Read delay */ -#define AT91_DDRSDRC_RED_WRRD (0x1 << 27) /* Reduce Write to Read Delay [SAM9 Only] */ -#define AT91_DDRSDRC_TMRD (0xf << 28) /* Load mode to active/refresh delay */ - -#define AT91_DDRSDRC_T1PR 0x10 /* Timing 1 Register */ -#define AT91_DDRSDRC_TRFC (0x1f << 0) /* Row Cycle Delay */ -#define AT91_DDRSDRC_TXSNR (0xff << 8) /* Exit self-refresh to non-read */ -#define AT91_DDRSDRC_TXSRD (0xff << 16) /* Exit self-refresh to read */ -#define AT91_DDRSDRC_TXP (0xf << 24) /* Exit power-down delay */ - -#define AT91_DDRSDRC_T2PR 0x14 /* Timing 2 Register [SAM9 Only] */ -#define AT91_DDRSDRC_TXARD (0xf << 0) /* Exit active power down delay to read command in mode "Fast Exit" */ -#define AT91_DDRSDRC_TXARDS (0xf << 4) /* Exit active power down delay to read command in mode "Slow Exit" */ -#define AT91_DDRSDRC_TRPA (0xf << 8) /* Row Precharge All delay */ -#define AT91_DDRSDRC_TRTP (0x7 << 12) /* Read to Precharge delay */ - -#define AT91_DDRSDRC_LPR 0x1C /* Low Power Register */ -#define AT91_DDRSDRC_LPCB (3 << 0) /* Low-power Configurations */ -#define AT91_DDRSDRC_LPCB_DISABLE 0 -#define AT91_DDRSDRC_LPCB_SELF_REFRESH 1 -#define AT91_DDRSDRC_LPCB_POWER_DOWN 2 -#define AT91_DDRSDRC_LPCB_DEEP_POWER_DOWN 3 -#define AT91_DDRSDRC_CLKFR (1 << 2) /* Clock Frozen */ -#define AT91_DDRSDRC_PASR (7 << 4) /* Partial Array Self Refresh */ -#define AT91_DDRSDRC_TCSR (3 << 8) /* Temperature Compensated Self Refresh */ -#define AT91_DDRSDRC_DS (3 << 10) /* Drive Strength */ -#define AT91_DDRSDRC_TIMEOUT (3 << 12) /* Time to define when Low Power Mode is enabled */ -#define AT91_DDRSDRC_TIMEOUT_0_CLK_CYCLES (0 << 12) -#define AT91_DDRSDRC_TIMEOUT_64_CLK_CYCLES (1 << 12) -#define AT91_DDRSDRC_TIMEOUT_128_CLK_CYCLES (2 << 12) -#define AT91_DDRSDRC_APDE (1 << 16) /* Active power down exit time */ -#define AT91_DDRSDRC_UPD_MR (3 << 20) /* Update load mode register and extended mode register */ - -#define AT91_DDRSDRC_MDR 0x20 /* Memory Device Register */ -#define AT91_DDRSDRC_MD (3 << 0) /* Memory Device Type */ -#define AT91_DDRSDRC_MD_SDR 0 -#define AT91_DDRSDRC_MD_LOW_POWER_SDR 1 -#define AT91_DDRSDRC_MD_LOW_POWER_DDR 3 -#define AT91_DDRSDRC_MD_DDR2 6 /* [SAM9 Only] */ -#define AT91_DDRSDRC_DBW (1 << 4) /* Data Bus Width */ -#define AT91_DDRSDRC_DBW_32BITS (0 << 4) -#define AT91_DDRSDRC_DBW_16BITS (1 << 4) - -#define AT91_DDRSDRC_DLL 0x24 /* DLL Information Register */ -#define AT91_DDRSDRC_MDINC (1 << 0) /* Master Delay increment */ -#define AT91_DDRSDRC_MDDEC (1 << 1) /* Master Delay decrement */ -#define AT91_DDRSDRC_MDOVF (1 << 2) /* Master Delay Overflow */ -#define AT91_DDRSDRC_MDVAL (0xff << 8) /* Master Delay value */ - -#define AT91_DDRSDRC_HS 0x2C /* High Speed Register [SAM9 Only] */ -#define AT91_DDRSDRC_DIS_ATCP_RD (1 << 2) /* Anticip read access is disabled */ - -#define AT91_DDRSDRC_DELAY(n) (0x30 + (0x4 * (n))) /* Delay I/O Register n */ - -#define AT91_DDRSDRC_WPMR 0xE4 /* Write Protect Mode Register [SAM9 Only] */ -#define AT91_DDRSDRC_WP (1 << 0) /* Write protect enable */ -#define AT91_DDRSDRC_WPKEY (0xffffff << 8) /* Write protect key */ -#define AT91_DDRSDRC_KEY (0x444452 << 8) /* Write protect key = "DDR" */ - -#define AT91_DDRSDRC_WPSR 0xE8 /* Write Protect Status Register [SAM9 Only] */ -#define AT91_DDRSDRC_WPVS (1 << 0) /* Write protect violation status */ -#define AT91_DDRSDRC_WPVSRC (0xffff << 8) /* Write protect violation source */ - -#endif diff --git a/arch/arm/mach-at91/include/mach/at91sam9_sdramc.h b/arch/arm/mach-at91/include/mach/at91sam9_sdramc.h deleted file mode 100644 index 3d085a9a7450..000000000000 --- a/arch/arm/mach-at91/include/mach/at91sam9_sdramc.h +++ /dev/null @@ -1,85 +0,0 @@ -/* - * arch/arm/mach-at91/include/mach/at91sam9_sdramc.h - * - * Copyright (C) 2007 Andrew Victor - * Copyright (C) 2007 Atmel Corporation. - * - * SDRAM Controllers (SDRAMC) - System peripherals registers. - * Based on AT91SAM9261 datasheet revision D. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef AT91SAM9_SDRAMC_H -#define AT91SAM9_SDRAMC_H - -/* SDRAM Controller (SDRAMC) registers */ -#define AT91_SDRAMC_MR 0x00 /* SDRAM Controller Mode Register */ -#define AT91_SDRAMC_MODE (0xf << 0) /* Command Mode */ -#define AT91_SDRAMC_MODE_NORMAL 0 -#define AT91_SDRAMC_MODE_NOP 1 -#define AT91_SDRAMC_MODE_PRECHARGE 2 -#define AT91_SDRAMC_MODE_LMR 3 -#define AT91_SDRAMC_MODE_REFRESH 4 -#define AT91_SDRAMC_MODE_EXT_LMR 5 -#define AT91_SDRAMC_MODE_DEEP 6 - -#define AT91_SDRAMC_TR 0x04 /* SDRAM Controller Refresh Timer Register */ -#define AT91_SDRAMC_COUNT (0xfff << 0) /* Refresh Timer Counter */ - -#define AT91_SDRAMC_CR 0x08 /* SDRAM Controller Configuration Register */ -#define AT91_SDRAMC_NC (3 << 0) /* Number of Column Bits */ -#define AT91_SDRAMC_NC_8 (0 << 0) -#define AT91_SDRAMC_NC_9 (1 << 0) -#define AT91_SDRAMC_NC_10 (2 << 0) -#define AT91_SDRAMC_NC_11 (3 << 0) -#define AT91_SDRAMC_NR (3 << 2) /* Number of Row Bits */ -#define AT91_SDRAMC_NR_11 (0 << 2) -#define AT91_SDRAMC_NR_12 (1 << 2) -#define AT91_SDRAMC_NR_13 (2 << 2) -#define AT91_SDRAMC_NB (1 << 4) /* Number of Banks */ -#define AT91_SDRAMC_NB_2 (0 << 4) -#define AT91_SDRAMC_NB_4 (1 << 4) -#define AT91_SDRAMC_CAS (3 << 5) /* CAS Latency */ -#define AT91_SDRAMC_CAS_1 (1 << 5) -#define AT91_SDRAMC_CAS_2 (2 << 5) -#define AT91_SDRAMC_CAS_3 (3 << 5) -#define AT91_SDRAMC_DBW (1 << 7) /* Data Bus Width */ -#define AT91_SDRAMC_DBW_32 (0 << 7) -#define AT91_SDRAMC_DBW_16 (1 << 7) -#define AT91_SDRAMC_TWR (0xf << 8) /* Write Recovery Delay */ -#define AT91_SDRAMC_TRC (0xf << 12) /* Row Cycle Delay */ -#define AT91_SDRAMC_TRP (0xf << 16) /* Row Precharge Delay */ -#define AT91_SDRAMC_TRCD (0xf << 20) /* Row to Column Delay */ -#define AT91_SDRAMC_TRAS (0xf << 24) /* Active to Precharge Delay */ -#define AT91_SDRAMC_TXSR (0xf << 28) /* Exit Self Refresh to Active Delay */ - -#define AT91_SDRAMC_LPR 0x10 /* SDRAM Controller Low Power Register */ -#define AT91_SDRAMC_LPCB (3 << 0) /* Low-power Configurations */ -#define AT91_SDRAMC_LPCB_DISABLE 0 -#define AT91_SDRAMC_LPCB_SELF_REFRESH 1 -#define AT91_SDRAMC_LPCB_POWER_DOWN 2 -#define AT91_SDRAMC_LPCB_DEEP_POWER_DOWN 3 -#define AT91_SDRAMC_PASR (7 << 4) /* Partial Array Self Refresh */ -#define AT91_SDRAMC_TCSR (3 << 8) /* Temperature Compensated Self Refresh */ -#define AT91_SDRAMC_DS (3 << 10) /* Drive Strength */ -#define AT91_SDRAMC_TIMEOUT (3 << 12) /* Time to define when Low Power Mode is enabled */ -#define AT91_SDRAMC_TIMEOUT_0_CLK_CYCLES (0 << 12) -#define AT91_SDRAMC_TIMEOUT_64_CLK_CYCLES (1 << 12) -#define AT91_SDRAMC_TIMEOUT_128_CLK_CYCLES (2 << 12) - -#define AT91_SDRAMC_IER 0x14 /* SDRAM Controller Interrupt Enable Register */ -#define AT91_SDRAMC_IDR 0x18 /* SDRAM Controller Interrupt Disable Register */ -#define AT91_SDRAMC_IMR 0x1C /* SDRAM Controller Interrupt Mask Register */ -#define AT91_SDRAMC_ISR 0x20 /* SDRAM Controller Interrupt Status Register */ -#define AT91_SDRAMC_RES (1 << 0) /* Refresh Error Status */ - -#define AT91_SDRAMC_MDR 0x24 /* SDRAM Memory Device Register */ -#define AT91_SDRAMC_MD (3 << 0) /* Memory Device Type */ -#define AT91_SDRAMC_MD_SDRAM 0 -#define AT91_SDRAMC_MD_LOW_POWER_SDRAM 1 - -#endif diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h index c5101dcb4fb0..d2c89963af2d 100644 --- a/arch/arm/mach-at91/pm.h +++ b/arch/arm/mach-at91/pm.h @@ -14,7 +14,6 @@ #include #include -#include #ifdef CONFIG_PM extern void at91_pm_set_standby(void (*at91_standby)(void)); diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c index 3cb36693343a..69a75d99ae92 100644 --- a/drivers/power/reset/at91-reset.c +++ b/drivers/power/reset/at91-reset.c @@ -19,8 +19,8 @@ #include -#include -#include +#include +#include #define AT91_RSTC_CR 0x00 /* Reset Controller Control Register */ #define AT91_RSTC_PROCRST BIT(0) /* Processor Reset */ diff --git a/include/soc/at91/at91rm9200_sdramc.h b/include/soc/at91/at91rm9200_sdramc.h new file mode 100644 index 000000000000..aa047f458f1b --- /dev/null +++ b/include/soc/at91/at91rm9200_sdramc.h @@ -0,0 +1,63 @@ +/* + * arch/arm/mach-at91/include/mach/at91rm9200_sdramc.h + * + * Copyright (C) 2005 Ivan Kokshaysky + * Copyright (C) SAN People + * + * Memory Controllers (SDRAMC only) - System peripherals registers. + * Based on AT91RM9200 datasheet revision E. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef AT91RM9200_SDRAMC_H +#define AT91RM9200_SDRAMC_H + +/* SDRAM Controller registers */ +#define AT91RM9200_SDRAMC_MR 0x90 /* Mode Register */ +#define AT91RM9200_SDRAMC_MODE (0xf << 0) /* Command Mode */ +#define AT91RM9200_SDRAMC_MODE_NORMAL (0 << 0) +#define AT91RM9200_SDRAMC_MODE_NOP (1 << 0) +#define AT91RM9200_SDRAMC_MODE_PRECHARGE (2 << 0) +#define AT91RM9200_SDRAMC_MODE_LMR (3 << 0) +#define AT91RM9200_SDRAMC_MODE_REFRESH (4 << 0) +#define AT91RM9200_SDRAMC_DBW (1 << 4) /* Data Bus Width */ +#define AT91RM9200_SDRAMC_DBW_32 (0 << 4) +#define AT91RM9200_SDRAMC_DBW_16 (1 << 4) + +#define AT91RM9200_SDRAMC_TR 0x94 /* Refresh Timer Register */ +#define AT91RM9200_SDRAMC_COUNT (0xfff << 0) /* Refresh Timer Count */ + +#define AT91RM9200_SDRAMC_CR 0x98 /* Configuration Register */ +#define AT91RM9200_SDRAMC_NC (3 << 0) /* Number of Column Bits */ +#define AT91RM9200_SDRAMC_NC_8 (0 << 0) +#define AT91RM9200_SDRAMC_NC_9 (1 << 0) +#define AT91RM9200_SDRAMC_NC_10 (2 << 0) +#define AT91RM9200_SDRAMC_NC_11 (3 << 0) +#define AT91RM9200_SDRAMC_NR (3 << 2) /* Number of Row Bits */ +#define AT91RM9200_SDRAMC_NR_11 (0 << 2) +#define AT91RM9200_SDRAMC_NR_12 (1 << 2) +#define AT91RM9200_SDRAMC_NR_13 (2 << 2) +#define AT91RM9200_SDRAMC_NB (1 << 4) /* Number of Banks */ +#define AT91RM9200_SDRAMC_NB_2 (0 << 4) +#define AT91RM9200_SDRAMC_NB_4 (1 << 4) +#define AT91RM9200_SDRAMC_CAS (3 << 5) /* CAS Latency */ +#define AT91RM9200_SDRAMC_CAS_2 (2 << 5) +#define AT91RM9200_SDRAMC_TWR (0xf << 7) /* Write Recovery Delay */ +#define AT91RM9200_SDRAMC_TRC (0xf << 11) /* Row Cycle Delay */ +#define AT91RM9200_SDRAMC_TRP (0xf << 15) /* Row Precharge Delay */ +#define AT91RM9200_SDRAMC_TRCD (0xf << 19) /* Row to Column Delay */ +#define AT91RM9200_SDRAMC_TRAS (0xf << 23) /* Active to Precharge Delay */ +#define AT91RM9200_SDRAMC_TXSR (0xf << 27) /* Exit Self Refresh to Active Delay */ + +#define AT91RM9200_SDRAMC_SRR 0x9c /* Self Refresh Register */ +#define AT91RM9200_SDRAMC_LPR 0xa0 /* Low Power Register */ +#define AT91RM9200_SDRAMC_IER 0xa4 /* Interrupt Enable Register */ +#define AT91RM9200_SDRAMC_IDR 0xa8 /* Interrupt Disable Register */ +#define AT91RM9200_SDRAMC_IMR 0xac /* Interrupt Mask Register */ +#define AT91RM9200_SDRAMC_ISR 0xb0 /* Interrupt Status Register */ + +#endif diff --git a/include/soc/at91/at91sam9_ddrsdr.h b/include/soc/at91/at91sam9_ddrsdr.h new file mode 100644 index 000000000000..0210797abf2e --- /dev/null +++ b/include/soc/at91/at91sam9_ddrsdr.h @@ -0,0 +1,124 @@ +/* + * Header file for the Atmel DDR/SDR SDRAM Controller + * + * Copyright (C) 2010 Atmel Corporation + * Nicolas Ferre + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef AT91SAM9_DDRSDR_H +#define AT91SAM9_DDRSDR_H + +#define AT91_DDRSDRC_MR 0x00 /* Mode Register */ +#define AT91_DDRSDRC_MODE (0x7 << 0) /* Command Mode */ +#define AT91_DDRSDRC_MODE_NORMAL 0 +#define AT91_DDRSDRC_MODE_NOP 1 +#define AT91_DDRSDRC_MODE_PRECHARGE 2 +#define AT91_DDRSDRC_MODE_LMR 3 +#define AT91_DDRSDRC_MODE_REFRESH 4 +#define AT91_DDRSDRC_MODE_EXT_LMR 5 +#define AT91_DDRSDRC_MODE_DEEP 6 + +#define AT91_DDRSDRC_RTR 0x04 /* Refresh Timer Register */ +#define AT91_DDRSDRC_COUNT (0xfff << 0) /* Refresh Timer Counter */ + +#define AT91_DDRSDRC_CR 0x08 /* Configuration Register */ +#define AT91_DDRSDRC_NC (3 << 0) /* Number of Column Bits */ +#define AT91_DDRSDRC_NC_SDR8 (0 << 0) +#define AT91_DDRSDRC_NC_SDR9 (1 << 0) +#define AT91_DDRSDRC_NC_SDR10 (2 << 0) +#define AT91_DDRSDRC_NC_SDR11 (3 << 0) +#define AT91_DDRSDRC_NC_DDR9 (0 << 0) +#define AT91_DDRSDRC_NC_DDR10 (1 << 0) +#define AT91_DDRSDRC_NC_DDR11 (2 << 0) +#define AT91_DDRSDRC_NC_DDR12 (3 << 0) +#define AT91_DDRSDRC_NR (3 << 2) /* Number of Row Bits */ +#define AT91_DDRSDRC_NR_11 (0 << 2) +#define AT91_DDRSDRC_NR_12 (1 << 2) +#define AT91_DDRSDRC_NR_13 (2 << 2) +#define AT91_DDRSDRC_NR_14 (3 << 2) +#define AT91_DDRSDRC_CAS (7 << 4) /* CAS Latency */ +#define AT91_DDRSDRC_CAS_2 (2 << 4) +#define AT91_DDRSDRC_CAS_3 (3 << 4) +#define AT91_DDRSDRC_CAS_25 (6 << 4) +#define AT91_DDRSDRC_RST_DLL (1 << 7) /* Reset DLL */ +#define AT91_DDRSDRC_DICDS (1 << 8) /* Output impedance control */ +#define AT91_DDRSDRC_DIS_DLL (1 << 9) /* Disable DLL [SAM9 Only] */ +#define AT91_DDRSDRC_OCD (1 << 12) /* Off-Chip Driver [SAM9 Only] */ +#define AT91_DDRSDRC_DQMS (1 << 16) /* Mask Data is Shared [SAM9 Only] */ +#define AT91_DDRSDRC_ACTBST (1 << 18) /* Active Bank X to Burst Stop Read Access Bank Y [SAM9 Only] */ + +#define AT91_DDRSDRC_T0PR 0x0C /* Timing 0 Register */ +#define AT91_DDRSDRC_TRAS (0xf << 0) /* Active to Precharge delay */ +#define AT91_DDRSDRC_TRCD (0xf << 4) /* Row to Column delay */ +#define AT91_DDRSDRC_TWR (0xf << 8) /* Write recovery delay */ +#define AT91_DDRSDRC_TRC (0xf << 12) /* Row cycle delay */ +#define AT91_DDRSDRC_TRP (0xf << 16) /* Row precharge delay */ +#define AT91_DDRSDRC_TRRD (0xf << 20) /* Active BankA to BankB */ +#define AT91_DDRSDRC_TWTR (0x7 << 24) /* Internal Write to Read delay */ +#define AT91_DDRSDRC_RED_WRRD (0x1 << 27) /* Reduce Write to Read Delay [SAM9 Only] */ +#define AT91_DDRSDRC_TMRD (0xf << 28) /* Load mode to active/refresh delay */ + +#define AT91_DDRSDRC_T1PR 0x10 /* Timing 1 Register */ +#define AT91_DDRSDRC_TRFC (0x1f << 0) /* Row Cycle Delay */ +#define AT91_DDRSDRC_TXSNR (0xff << 8) /* Exit self-refresh to non-read */ +#define AT91_DDRSDRC_TXSRD (0xff << 16) /* Exit self-refresh to read */ +#define AT91_DDRSDRC_TXP (0xf << 24) /* Exit power-down delay */ + +#define AT91_DDRSDRC_T2PR 0x14 /* Timing 2 Register [SAM9 Only] */ +#define AT91_DDRSDRC_TXARD (0xf << 0) /* Exit active power down delay to read command in mode "Fast Exit" */ +#define AT91_DDRSDRC_TXARDS (0xf << 4) /* Exit active power down delay to read command in mode "Slow Exit" */ +#define AT91_DDRSDRC_TRPA (0xf << 8) /* Row Precharge All delay */ +#define AT91_DDRSDRC_TRTP (0x7 << 12) /* Read to Precharge delay */ + +#define AT91_DDRSDRC_LPR 0x1C /* Low Power Register */ +#define AT91_DDRSDRC_LPCB (3 << 0) /* Low-power Configurations */ +#define AT91_DDRSDRC_LPCB_DISABLE 0 +#define AT91_DDRSDRC_LPCB_SELF_REFRESH 1 +#define AT91_DDRSDRC_LPCB_POWER_DOWN 2 +#define AT91_DDRSDRC_LPCB_DEEP_POWER_DOWN 3 +#define AT91_DDRSDRC_CLKFR (1 << 2) /* Clock Frozen */ +#define AT91_DDRSDRC_PASR (7 << 4) /* Partial Array Self Refresh */ +#define AT91_DDRSDRC_TCSR (3 << 8) /* Temperature Compensated Self Refresh */ +#define AT91_DDRSDRC_DS (3 << 10) /* Drive Strength */ +#define AT91_DDRSDRC_TIMEOUT (3 << 12) /* Time to define when Low Power Mode is enabled */ +#define AT91_DDRSDRC_TIMEOUT_0_CLK_CYCLES (0 << 12) +#define AT91_DDRSDRC_TIMEOUT_64_CLK_CYCLES (1 << 12) +#define AT91_DDRSDRC_TIMEOUT_128_CLK_CYCLES (2 << 12) +#define AT91_DDRSDRC_APDE (1 << 16) /* Active power down exit time */ +#define AT91_DDRSDRC_UPD_MR (3 << 20) /* Update load mode register and extended mode register */ + +#define AT91_DDRSDRC_MDR 0x20 /* Memory Device Register */ +#define AT91_DDRSDRC_MD (3 << 0) /* Memory Device Type */ +#define AT91_DDRSDRC_MD_SDR 0 +#define AT91_DDRSDRC_MD_LOW_POWER_SDR 1 +#define AT91_DDRSDRC_MD_LOW_POWER_DDR 3 +#define AT91_DDRSDRC_MD_DDR2 6 /* [SAM9 Only] */ +#define AT91_DDRSDRC_DBW (1 << 4) /* Data Bus Width */ +#define AT91_DDRSDRC_DBW_32BITS (0 << 4) +#define AT91_DDRSDRC_DBW_16BITS (1 << 4) + +#define AT91_DDRSDRC_DLL 0x24 /* DLL Information Register */ +#define AT91_DDRSDRC_MDINC (1 << 0) /* Master Delay increment */ +#define AT91_DDRSDRC_MDDEC (1 << 1) /* Master Delay decrement */ +#define AT91_DDRSDRC_MDOVF (1 << 2) /* Master Delay Overflow */ +#define AT91_DDRSDRC_MDVAL (0xff << 8) /* Master Delay value */ + +#define AT91_DDRSDRC_HS 0x2C /* High Speed Register [SAM9 Only] */ +#define AT91_DDRSDRC_DIS_ATCP_RD (1 << 2) /* Anticip read access is disabled */ + +#define AT91_DDRSDRC_DELAY(n) (0x30 + (0x4 * (n))) /* Delay I/O Register n */ + +#define AT91_DDRSDRC_WPMR 0xE4 /* Write Protect Mode Register [SAM9 Only] */ +#define AT91_DDRSDRC_WP (1 << 0) /* Write protect enable */ +#define AT91_DDRSDRC_WPKEY (0xffffff << 8) /* Write protect key */ +#define AT91_DDRSDRC_KEY (0x444452 << 8) /* Write protect key = "DDR" */ + +#define AT91_DDRSDRC_WPSR 0xE8 /* Write Protect Status Register [SAM9 Only] */ +#define AT91_DDRSDRC_WPVS (1 << 0) /* Write protect violation status */ +#define AT91_DDRSDRC_WPVSRC (0xffff << 8) /* Write protect violation source */ + +#endif diff --git a/include/soc/at91/at91sam9_sdramc.h b/include/soc/at91/at91sam9_sdramc.h new file mode 100644 index 000000000000..3d085a9a7450 --- /dev/null +++ b/include/soc/at91/at91sam9_sdramc.h @@ -0,0 +1,85 @@ +/* + * arch/arm/mach-at91/include/mach/at91sam9_sdramc.h + * + * Copyright (C) 2007 Andrew Victor + * Copyright (C) 2007 Atmel Corporation. + * + * SDRAM Controllers (SDRAMC) - System peripherals registers. + * Based on AT91SAM9261 datasheet revision D. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef AT91SAM9_SDRAMC_H +#define AT91SAM9_SDRAMC_H + +/* SDRAM Controller (SDRAMC) registers */ +#define AT91_SDRAMC_MR 0x00 /* SDRAM Controller Mode Register */ +#define AT91_SDRAMC_MODE (0xf << 0) /* Command Mode */ +#define AT91_SDRAMC_MODE_NORMAL 0 +#define AT91_SDRAMC_MODE_NOP 1 +#define AT91_SDRAMC_MODE_PRECHARGE 2 +#define AT91_SDRAMC_MODE_LMR 3 +#define AT91_SDRAMC_MODE_REFRESH 4 +#define AT91_SDRAMC_MODE_EXT_LMR 5 +#define AT91_SDRAMC_MODE_DEEP 6 + +#define AT91_SDRAMC_TR 0x04 /* SDRAM Controller Refresh Timer Register */ +#define AT91_SDRAMC_COUNT (0xfff << 0) /* Refresh Timer Counter */ + +#define AT91_SDRAMC_CR 0x08 /* SDRAM Controller Configuration Register */ +#define AT91_SDRAMC_NC (3 << 0) /* Number of Column Bits */ +#define AT91_SDRAMC_NC_8 (0 << 0) +#define AT91_SDRAMC_NC_9 (1 << 0) +#define AT91_SDRAMC_NC_10 (2 << 0) +#define AT91_SDRAMC_NC_11 (3 << 0) +#define AT91_SDRAMC_NR (3 << 2) /* Number of Row Bits */ +#define AT91_SDRAMC_NR_11 (0 << 2) +#define AT91_SDRAMC_NR_12 (1 << 2) +#define AT91_SDRAMC_NR_13 (2 << 2) +#define AT91_SDRAMC_NB (1 << 4) /* Number of Banks */ +#define AT91_SDRAMC_NB_2 (0 << 4) +#define AT91_SDRAMC_NB_4 (1 << 4) +#define AT91_SDRAMC_CAS (3 << 5) /* CAS Latency */ +#define AT91_SDRAMC_CAS_1 (1 << 5) +#define AT91_SDRAMC_CAS_2 (2 << 5) +#define AT91_SDRAMC_CAS_3 (3 << 5) +#define AT91_SDRAMC_DBW (1 << 7) /* Data Bus Width */ +#define AT91_SDRAMC_DBW_32 (0 << 7) +#define AT91_SDRAMC_DBW_16 (1 << 7) +#define AT91_SDRAMC_TWR (0xf << 8) /* Write Recovery Delay */ +#define AT91_SDRAMC_TRC (0xf << 12) /* Row Cycle Delay */ +#define AT91_SDRAMC_TRP (0xf << 16) /* Row Precharge Delay */ +#define AT91_SDRAMC_TRCD (0xf << 20) /* Row to Column Delay */ +#define AT91_SDRAMC_TRAS (0xf << 24) /* Active to Precharge Delay */ +#define AT91_SDRAMC_TXSR (0xf << 28) /* Exit Self Refresh to Active Delay */ + +#define AT91_SDRAMC_LPR 0x10 /* SDRAM Controller Low Power Register */ +#define AT91_SDRAMC_LPCB (3 << 0) /* Low-power Configurations */ +#define AT91_SDRAMC_LPCB_DISABLE 0 +#define AT91_SDRAMC_LPCB_SELF_REFRESH 1 +#define AT91_SDRAMC_LPCB_POWER_DOWN 2 +#define AT91_SDRAMC_LPCB_DEEP_POWER_DOWN 3 +#define AT91_SDRAMC_PASR (7 << 4) /* Partial Array Self Refresh */ +#define AT91_SDRAMC_TCSR (3 << 8) /* Temperature Compensated Self Refresh */ +#define AT91_SDRAMC_DS (3 << 10) /* Drive Strength */ +#define AT91_SDRAMC_TIMEOUT (3 << 12) /* Time to define when Low Power Mode is enabled */ +#define AT91_SDRAMC_TIMEOUT_0_CLK_CYCLES (0 << 12) +#define AT91_SDRAMC_TIMEOUT_64_CLK_CYCLES (1 << 12) +#define AT91_SDRAMC_TIMEOUT_128_CLK_CYCLES (2 << 12) + +#define AT91_SDRAMC_IER 0x14 /* SDRAM Controller Interrupt Enable Register */ +#define AT91_SDRAMC_IDR 0x18 /* SDRAM Controller Interrupt Disable Register */ +#define AT91_SDRAMC_IMR 0x1C /* SDRAM Controller Interrupt Mask Register */ +#define AT91_SDRAMC_ISR 0x20 /* SDRAM Controller Interrupt Status Register */ +#define AT91_SDRAMC_RES (1 << 0) /* Refresh Error Status */ + +#define AT91_SDRAMC_MDR 0x24 /* SDRAM Memory Device Register */ +#define AT91_SDRAMC_MD (3 << 0) /* Memory Device Type */ +#define AT91_SDRAMC_MD_SDRAM 0 +#define AT91_SDRAMC_MD_LOW_POWER_SDRAM 1 + +#endif -- cgit v1.2.3-59-g8ed1b From c47b15c4928c46b889a1ea2e9b54f4110514e1c3 Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Thu, 13 Nov 2014 09:41:58 -0500 Subject: arch/tile: update MAINTAINERS email to EZchip EZchip Semiconductor closed the acquisition of Tilera Corp last week, and tilera.com email addresses are now ezchip.com. http://www.ezchip.com/pr_141106.htm Signed-off-by: Chris Metcalf --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 3c6427190be2..81764acd163d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9362,7 +9362,7 @@ F: include/uapi/linux/tipc*.h F: net/tipc/ TILE ARCHITECTURE -M: Chris Metcalf +M: Chris Metcalf W: http://www.tilera.com/scm/ S: Supported F: arch/tile/ -- cgit v1.2.3-59-g8ed1b From 62d0ff83c6e2221662fa0dccf2de096bdc7a2fc4 Mon Sep 17 00:00:00 2001 From: Minghuan Lian Date: Wed, 5 Nov 2014 16:45:11 +0800 Subject: PCI: layerscape: Add Freescale Layerscape PCIe driver Add support for Freescale Layerscape PCIe controller. This driver re-uses the Synopsis DesignWare core code. [bhelgaas: add Kconfig dependency on CONFIG_ARM] Signed-off-by: Minghuan Lian Signed-off-by: Bjorn Helgaas Acked-by: Arnd Bergmann --- .../devicetree/bindings/pci/layerscape-pci.txt | 42 +++++ MAINTAINERS | 10 ++ drivers/pci/host/Kconfig | 8 + drivers/pci/host/Makefile | 1 + drivers/pci/host/pci-layerscape.c | 179 +++++++++++++++++++++ 5 files changed, 240 insertions(+) create mode 100644 Documentation/devicetree/bindings/pci/layerscape-pci.txt create mode 100644 drivers/pci/host/pci-layerscape.c (limited to 'MAINTAINERS') diff --git a/Documentation/devicetree/bindings/pci/layerscape-pci.txt b/Documentation/devicetree/bindings/pci/layerscape-pci.txt new file mode 100644 index 000000000000..6286f049bf18 --- /dev/null +++ b/Documentation/devicetree/bindings/pci/layerscape-pci.txt @@ -0,0 +1,42 @@ +Freescale Layerscape PCIe controller + +This PCIe host controller is based on the Synopsis Designware PCIe IP +and thus inherits all the common properties defined in designware-pcie.txt. + +Required properties: +- compatible: should contain the platform identifier such as "fsl,ls1021a-pcie" +- reg: base addresses and lengths of the PCIe controller +- interrupts: A list of interrupt outputs of the controller. Must contain an + entry for each entry in the interrupt-names property. +- interrupt-names: Must include the following entries: + "intr": The interrupt that is asserted for controller interrupts +- fsl,pcie-scfg: Must include two entries. + The first entry must be a link to the SCFG device node + The second entry must be '0' or '1' based on physical PCIe controller index. + This is used to get SCFG PEXN registers + +Example: + + pcie@3400000 { + compatible = "fsl,ls1021a-pcie", "snps,dw-pcie"; + reg = <0x00 0x03400000 0x0 0x00010000 /* controller registers */ + 0x40 0x00000000 0x0 0x00002000>; /* configuration space */ + reg-names = "regs", "config"; + interrupts = ; /* controller interrupt */ + interrupt-names = "intr"; + fsl,pcie-scfg = <&scfg 0>; + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; + num-lanes = <4>; + bus-range = <0x0 0xff>; + ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ + 0xc2000000 0x0 0x20000000 0x40 0x20000000 0x0 0x20000000 /* prefetchable memory */ + 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 0 7>; + interrupt-map = <0000 0 0 1 &gic GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>, + <0000 0 0 2 &gic GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>, + <0000 0 0 3 &gic GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>, + <0000 0 0 4 &gic GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>; + }; diff --git a/MAINTAINERS b/MAINTAINERS index a20df9bf8ab0..016e1ef1c16c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6983,6 +6983,16 @@ S: Maintained F: Documentation/devicetree/bindings/pci/xgene-pci.txt F: drivers/pci/host/pci-xgene.c +PCI DRIVER FOR FREESCALE LAYERSCAPE +M: Minghuan Lian +M: Mingkai Hu +M: Roy Zang +L: linuxppc-dev@lists.ozlabs.org +L: linux-pci@vger.kernel.org +L: linux-arm-kernel@lists.infradead.org +S: Maintained +F: drivers/pci/host/*layerscape* + PCI DRIVER FOR IMX6 M: Richard Zhu M: Lucas Stach diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig index 3dc25fad490c..67e2cc5a0bd4 100644 --- a/drivers/pci/host/Kconfig +++ b/drivers/pci/host/Kconfig @@ -91,4 +91,12 @@ config PCI_XGENE There are 5 internal PCIe ports available. Each port is GEN3 capable and have varied lanes from x1 to x8. +config PCI_LAYERSCAPE + bool "Freescale Layerscape PCIe controller" + depends on OF && ARM + select PCIE_DW + select MFD_SYSCON + help + Say Y here if you want PCIe controller support on Layerscape SoCs. + endmenu diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile index 26b3461d68d7..44c26998027f 100644 --- a/drivers/pci/host/Makefile +++ b/drivers/pci/host/Makefile @@ -11,3 +11,4 @@ obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o obj-$(CONFIG_PCI_XGENE) += pci-xgene.o +obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c new file mode 100644 index 000000000000..6697b1a4d4fa --- /dev/null +++ b/drivers/pci/host/pci-layerscape.c @@ -0,0 +1,179 @@ +/* + * PCIe host controller driver for Freescale Layerscape SoCs + * + * Copyright (C) 2014 Freescale Semiconductor. + * + * Author: Minghuan Lian + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pcie-designware.h" + +/* PEX1/2 Misc Ports Status Register */ +#define SCFG_PEXMSCPORTSR(pex_idx) (0x94 + (pex_idx) * 4) +#define LTSSM_STATE_SHIFT 20 +#define LTSSM_STATE_MASK 0x3f +#define LTSSM_PCIE_L0 0x11 /* L0 state */ + +/* Symbol Timer Register and Filter Mask Register 1 */ +#define PCIE_STRFMR1 0x71c + +struct ls_pcie { + struct list_head node; + struct device *dev; + struct pci_bus *bus; + void __iomem *dbi; + struct regmap *scfg; + struct pcie_port pp; + int index; + int msi_irq; +}; + +#define to_ls_pcie(x) container_of(x, struct ls_pcie, pp) + +static int ls_pcie_link_up(struct pcie_port *pp) +{ + u32 state; + struct ls_pcie *pcie = to_ls_pcie(pp); + + regmap_read(pcie->scfg, SCFG_PEXMSCPORTSR(pcie->index), &state); + state = (state >> LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK; + + if (state < LTSSM_PCIE_L0) + return 0; + + return 1; +} + +static void ls_pcie_host_init(struct pcie_port *pp) +{ + struct ls_pcie *pcie = to_ls_pcie(pp); + int count = 0; + u32 val; + + dw_pcie_setup_rc(pp); + + while (!ls_pcie_link_up(pp)) { + usleep_range(100, 1000); + count++; + if (count >= 200) { + dev_err(pp->dev, "phy link never came up\n"); + return; + } + } + + /* + * LS1021A Workaround for internal TKT228622 + * to fix the INTx hang issue + */ + val = ioread32(pcie->dbi + PCIE_STRFMR1); + val &= 0xffff; + iowrite32(val, pcie->dbi + PCIE_STRFMR1); +} + +static struct pcie_host_ops ls_pcie_host_ops = { + .link_up = ls_pcie_link_up, + .host_init = ls_pcie_host_init, +}; + +static int ls_add_pcie_port(struct ls_pcie *pcie) +{ + struct pcie_port *pp; + int ret; + + pp = &pcie->pp; + pp->dev = pcie->dev; + pp->dbi_base = pcie->dbi; + pp->root_bus_nr = -1; + pp->ops = &ls_pcie_host_ops; + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(pp->dev, "failed to initialize host\n"); + return ret; + } + + return 0; +} + +static int __init ls_pcie_probe(struct platform_device *pdev) +{ + struct ls_pcie *pcie; + struct resource *dbi_base; + u32 index[2]; + int ret; + + pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pcie->dev = &pdev->dev; + + dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); + if (!dbi_base) { + dev_err(&pdev->dev, "missing *regs* space\n"); + return -ENODEV; + } + + pcie->dbi = devm_ioremap_resource(&pdev->dev, dbi_base); + if (IS_ERR(pcie->dbi)) + return PTR_ERR(pcie->dbi); + + pcie->scfg = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "fsl,pcie-scfg"); + if (IS_ERR(pcie->scfg)) { + dev_err(&pdev->dev, "No syscfg phandle specified\n"); + return PTR_ERR(pcie->scfg); + } + + ret = of_property_read_u32_array(pdev->dev.of_node, + "fsl,pcie-scfg", index, 2); + if (ret) + return ret; + pcie->index = index[1]; + + ret = ls_add_pcie_port(pcie); + if (ret < 0) + return ret; + + platform_set_drvdata(pdev, pcie); + + return 0; +} + +static const struct of_device_id ls_pcie_of_match[] = { + { .compatible = "fsl,ls1021a-pcie" }, + { }, +}; +MODULE_DEVICE_TABLE(of, ls_pcie_of_match); + +static struct platform_driver ls_pcie_driver = { + .driver = { + .name = "layerscape-pcie", + .owner = THIS_MODULE, + .of_match_table = ls_pcie_of_match, + }, +}; + +module_platform_driver_probe(ls_pcie_driver, ls_pcie_probe); + +MODULE_AUTHOR("Minghuan Lian "); +MODULE_DESCRIPTION("Freescale Layerscape PCIe host controller driver"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3-59-g8ed1b From 8fe671fc0b9f5a0d013153a9ddbd979d6eff0547 Mon Sep 17 00:00:00 2001 From: Daniel Baluta Date: Thu, 13 Nov 2014 15:19:47 -0800 Subject: MAINTAINERS: add IIO include files Files under include/linux/iio were not reported as part of the IIO subsystem. Signed-off-by: Daniel Baluta Reported-by: Cristina Ciocan Reviewed-by: Jingoo Han Cc: Hartmut Knaack Cc: Lars-Peter Clausen Cc: Peter Meerwald Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index ea4d0058fd1b..60b1163dba28 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4716,6 +4716,7 @@ L: linux-iio@vger.kernel.org S: Maintained F: drivers/iio/ F: drivers/staging/iio/ +F: include/linux/iio/ IKANOS/ADI EAGLE ADSL USB DRIVER M: Matthieu Castet -- cgit v1.2.3-59-g8ed1b From baeb0d9b98c3450e22f8f8e4a6619b8b1d5106ff Mon Sep 17 00:00:00 2001 From: Michael Turquette Date: Thu, 13 Nov 2014 17:18:20 -0800 Subject: MAINTAINERS: clk framework git tree moved to kernel.org Signed-off-by: Michael Turquette --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 46d652f166ab..924499202dac 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2493,7 +2493,7 @@ COMMON CLK FRAMEWORK M: Mike Turquette M: Stephen Boyd L: linux-kernel@vger.kernel.org -T: git git://git.linaro.org/people/mturquette/linux.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux.git S: Maintained F: drivers/clk/ X: drivers/clk/clkdev.c -- cgit v1.2.3-59-g8ed1b From 6f15b602b8f93e1d199a85b95183d7a5836b3152 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Fri, 14 Nov 2014 13:26:46 +0100 Subject: simplefb: Add simplefb MAINTAINERS entry During the discussion about adding clock handling code to simplefb, it became clear that simplefb currently does not have an active maintainer. I've discussed this with Stephen Warren , the original author of simplefb, and with his permisson I'm picking up maintainership of simplefb. Cc: Stephen Warren Signed-off-by: Hans de Goede Acked-by: Geert Uytterhoeven Acked-by: Stephen Warren Reviewed-by: Maxime Ripard Reviewed-by: David Herrmann Acked-by: Grant Likely Signed-off-by: Tomi Valkeinen --- MAINTAINERS | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 3c6427190be2..d348ccc162fb 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8442,6 +8442,14 @@ F: drivers/media/usb/siano/ F: drivers/media/usb/siano/ F: drivers/media/mmc/siano/ +SIMPLEFB FB DRIVER +M: Hans de Goede +L: linux-fbdev@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/video/simple-framebuffer.txt +F: drivers/video/fbdev/simplefb.c +F: include/linux/platform_data/simplefb.h + SH_VEU V4L2 MEM2MEM DRIVER L: linux-media@vger.kernel.org S: Orphan -- cgit v1.2.3-59-g8ed1b From 0ce277e45f85433dde044561ad3b3e876a9ee906 Mon Sep 17 00:00:00 2001 From: Antti Palosaari Date: Wed, 12 Nov 2014 00:35:42 -0300 Subject: [media] MAINTAINERS: add mn88472 (Panasonic MN88472) Add mn88472 driver from staging. DVB-T/T2/C demodulator driver. Signed-off-by: Antti Palosaari Signed-off-by: Mauro Carvalho Chehab --- MAINTAINERS | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 2a9cff1da54b..644a1aecf44a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6115,6 +6115,17 @@ S: Supported F: include/linux/mlx5/ F: drivers/infiniband/hw/mlx5/ +MN88472 MEDIA DRIVER +M: Antti Palosaari +L: linux-media@vger.kernel.org +W: http://linuxtv.org/ +W: http://palosaari.fi/linux/ +Q: http://patchwork.linuxtv.org/project/linux-media/list/ +T: git git://linuxtv.org/anttip/media_tree.git +S: Maintained +F: drivers/staging/media/mn88472/ +F: drivers/media/dvb-frontends/mn88472.h + MODULE SUPPORT M: Rusty Russell S: Maintained -- cgit v1.2.3-59-g8ed1b From 4f4d238f81d3edf4ed019f3873cd49733471410f Mon Sep 17 00:00:00 2001 From: Antti Palosaari Date: Wed, 12 Nov 2014 00:53:57 -0300 Subject: [media] MAINTAINERS: add mn88473 (Panasonic MN88473) Add mn88473 driver from staging. DVB-T/T2/C demodulator driver. Signed-off-by: Antti Palosaari Signed-off-by: Mauro Carvalho Chehab --- MAINTAINERS | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 644a1aecf44a..a6288cacde10 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6126,6 +6126,17 @@ S: Maintained F: drivers/staging/media/mn88472/ F: drivers/media/dvb-frontends/mn88472.h +MN88473 MEDIA DRIVER +M: Antti Palosaari +L: linux-media@vger.kernel.org +W: http://linuxtv.org/ +W: http://palosaari.fi/linux/ +Q: http://patchwork.linuxtv.org/project/linux-media/list/ +T: git git://linuxtv.org/anttip/media_tree.git +S: Maintained +F: drivers/staging/media/mn88473/ +F: drivers/media/dvb-frontends/mn88473.h + MODULE SUPPORT M: Rusty Russell S: Maintained -- cgit v1.2.3-59-g8ed1b From 7a2071c58f36450fbf44a27d2e5d371c18534a25 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 14 Nov 2014 16:49:47 +0100 Subject: ARM: shmobile: Add early debugging support using SCIF(A) Add serial port debug macros for the SCIF(A) serial ports. This includes all supported shmobile SoCs, except for EMEV2. The configuration logic (both Kconfig and #ifdef) is more complicated than one would expect, for several reasons: 1. Not all SoCs have the same serial devices, and they're not always at the same addresses. 2. There are two different types: SCIF and SCIFA. Fortunately they can easily be distinguished by physical address. 3. Not all boards use the same serial port for the console. The defaults correspond to the boards that are supported in mainline. If you want to use a different serial port, just change the value of CONFIG_DEBUG_UART_PHYS, and the rest will auto-adapt. 4. debug_ll_io_init() maps the SCIF(A) registers to a fixed virtual address. 0xfdxxxxxx was chosen, as it should lie below VMALLOC_END = 0xff000000, and must not conflict with the 2 MiB reserved region at PCI_IO_VIRT_BASE = 0xfee00000. - On SoCs not using the legacy machine_desc.map_io(), debug_ll_io_init() is called by the ARM core code. - On SoCs using the legacy machine_desc.map_io(), debug_ll_io_init() must be called explicitly. Calls are added for r8a7740, r8a7779, sh7372, and sh73a0. This was derived from the r8a7790 version by Laurent Pinchart. Signed-off-by: Geert Uytterhoeven Acked-by: Laurent Pinchart Acked-by: Arnd Bergmann Tested-by: Simon Horman Signed-off-by: Simon Horman --- MAINTAINERS | 1 + arch/arm/Kconfig.debug | 80 +++++++++++++++++++++++++++++++++- arch/arm/include/debug/renesas-scif.S | 52 ++++++++++++++++++++++ arch/arm/mach-shmobile/setup-r8a7740.c | 1 + arch/arm/mach-shmobile/setup-r8a7779.c | 1 + arch/arm/mach-shmobile/setup-sh7372.c | 1 + arch/arm/mach-shmobile/setup-sh73a0.c | 1 + 7 files changed, 136 insertions(+), 1 deletion(-) create mode 100644 arch/arm/include/debug/renesas-scif.S (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index a20df9bf8ab0..7d843099e9c6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1387,6 +1387,7 @@ F: arch/arm/configs/lager_defconfig F: arch/arm/configs/mackerel_defconfig F: arch/arm/configs/marzen_defconfig F: arch/arm/configs/shmobile_defconfig +F: arch/arm/include/debug/renesas-scif.S F: arch/arm/mach-shmobile/ F: drivers/sh/ diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index 03dc4c1a8736..6ac3758f85d8 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -653,6 +653,64 @@ choice Say Y here if you want kernel low-level debugging support on Rockchip RK32xx based platforms. + config DEBUG_R7S72100_SCIF2 + bool "Kernel low-level debugging messages via SCIF2 on R7S72100" + depends on ARCH_R7S72100 + help + Say Y here if you want kernel low-level debugging support + via SCIF2 on Renesas RZ/A1H (R7S72100). + + config DEBUG_RCAR_GEN1_SCIF0 + bool "Kernel low-level debugging messages via SCIF0 on R8A7778" + depends on ARCH_R8A7778 + help + Say Y here if you want kernel low-level debugging support + via SCIF0 on Renesas R-Car M1A (R8A7778). + + config DEBUG_RCAR_GEN1_SCIF2 + bool "Kernel low-level debugging messages via SCIF2 on R8A7779" + depends on ARCH_R8A7779 + help + Say Y here if you want kernel low-level debugging support + via SCIF2 on Renesas R-Car H1 (R8A7779). + + config DEBUG_RCAR_GEN2_SCIF0 + bool "Kernel low-level debugging messages via SCIF0 on R8A7790/R8A7791/R8A7793)" + depends on ARCH_R8A7790 || ARCH_R8A7791 || ARCH_R8A7793 + help + Say Y here if you want kernel low-level debugging support + via SCIF0 on Renesas R-Car H2 (R8A7790), M2-W (R8A7791), or + M2-N (R8A7793). + + config DEBUG_RCAR_GEN2_SCIF2 + bool "Kernel low-level debugging messages via SCIF2 on R8A7794" + depends on ARCH_R8A7794 + help + Say Y here if you want kernel low-level debugging support + via SCIF2 on Renesas R-Car E2 (R8A7794). + + config DEBUG_RMOBILE_SCIFA0 + bool "Kernel low-level debugging messages via SCIFA0 on R8A73A4/SH7372" + depends on ARCH_R8A73A4 || ARCH_SH7372 + help + Say Y here if you want kernel low-level debugging support + via SCIFA0 on Renesas R-Mobile APE6 (R8A73A4) or SH-Mobile + AP4 (SH7372). + + config DEBUG_RMOBILE_SCIFA1 + bool "Kernel low-level debugging messages via SCIFA1 on R8A7740" + depends on ARCH_R8A7740 + help + Say Y here if you want kernel low-level debugging support + via SCIFA1 on Renesas R-Mobile A1 (R8A7740). + + config DEBUG_RMOBILE_SCIFA4 + bool "Kernel low-level debugging messages via SCIFA4 on SH73A0" + depends on ARCH_SH73A0 + help + Say Y here if you want kernel low-level debugging support + via SCIFA4 on Renesas SH-Mobile AG5 (SH73A0). + config DEBUG_S3C_UART0 depends on PLAT_SAMSUNG select DEBUG_EXYNOS_UART if ARCH_EXYNOS @@ -1061,6 +1119,14 @@ config DEBUG_LL_INCLUDE DEBUG_IMX6SX_UART default "debug/msm.S" if DEBUG_MSM_UART || DEBUG_QCOM_UARTDM default "debug/omap2plus.S" if DEBUG_OMAP2PLUS_UART + default "debug/renesas-scif.S" if DEBUG_R7S72100_SCIF2 + default "debug/renesas-scif.S" if DEBUG_RCAR_GEN1_SCIF0 + default "debug/renesas-scif.S" if DEBUG_RCAR_GEN1_SCIF2 + default "debug/renesas-scif.S" if DEBUG_RCAR_GEN2_SCIF0 + default "debug/renesas-scif.S" if DEBUG_RCAR_GEN2_SCIF2 + default "debug/renesas-scif.S" if DEBUG_RMOBILE_SCIFA0 + default "debug/renesas-scif.S" if DEBUG_RMOBILE_SCIFA1 + default "debug/renesas-scif.S" if DEBUG_RMOBILE_SCIFA4 default "debug/s3c24xx.S" if DEBUG_S3C24XX_UART default "debug/s5pv210.S" if DEBUG_S5PV210_UART default "debug/sirf.S" if DEBUG_SIRFPRIMA2_UART1 || DEBUG_SIRFMARCO_UART1 @@ -1152,6 +1218,12 @@ config DEBUG_UART_PHYS default 0xd4018000 if DEBUG_MMP_UART3 default 0xe0000000 if ARCH_SPEAR13XX default 0xe4007000 if DEBUG_HIP04_UART + default 0xe6c40000 if DEBUG_RMOBILE_SCIFA0 + default 0xe6c50000 if DEBUG_RMOBILE_SCIFA1 + default 0xe6c80000 if DEBUG_RMOBILE_SCIFA4 + default 0xe6e58000 if DEBUG_RCAR_GEN2_SCIF2 + default 0xe6e60000 if DEBUG_RCAR_GEN2_SCIF0 + default 0xe8008000 if DEBUG_R7S72100_SCIF2 default 0xf0000be0 if ARCH_EBSA110 default 0xf1012000 if DEBUG_MVEBU_UART_ALTERNATE default 0xf1012000 if ARCH_DOVE || ARCH_MV78XX0 || \ @@ -1164,13 +1236,19 @@ config DEBUG_UART_PHYS default 0xff690000 if DEBUG_RK32_UART2 default 0xffc02000 if DEBUG_SOCFPGA_UART default 0xffd82340 if ARCH_IOP13XX + default 0xffe40000 if DEBUG_RCAR_GEN1_SCIF0 + default 0xffe42000 if DEBUG_RCAR_GEN1_SCIF2 default 0xfff36000 if DEBUG_HIGHBANK_UART default 0xfffe8600 if DEBUG_UART_BCM63XX default 0xfffff700 if ARCH_IOP33X depends on DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \ DEBUG_LL_UART_EFM32 || \ DEBUG_UART_8250 || DEBUG_UART_PL01X || DEBUG_MESON_UARTAO || \ - DEBUG_MSM_UART || DEBUG_QCOM_UARTDM || DEBUG_S3C24XX_UART || \ + DEBUG_MSM_UART || DEBUG_QCOM_UARTDM || DEBUG_R7S72100_SCIF2 || \ + DEBUG_RCAR_GEN1_SCIF0 || DEBUG_RCAR_GEN1_SCIF2 || \ + DEBUG_RCAR_GEN2_SCIF0 || DEBUG_RCAR_GEN2_SCIF2 || \ + DEBUG_RMOBILE_SCIFA0 || DEBUG_RMOBILE_SCIFA1 || \ + DEBUG_RMOBILE_SCIFA4 || DEBUG_S3C24XX_UART || \ DEBUG_UART_BCM63XX config DEBUG_UART_VIRT diff --git a/arch/arm/include/debug/renesas-scif.S b/arch/arm/include/debug/renesas-scif.S new file mode 100644 index 000000000000..97820a8df51a --- /dev/null +++ b/arch/arm/include/debug/renesas-scif.S @@ -0,0 +1,52 @@ +/* + * Renesas SCIF(A) debugging macro include header + * + * Based on r8a7790.S + * + * Copyright (C) 2012-2013 Renesas Electronics Corporation + * Copyright (C) 1994-1999 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define SCIF_PHYS CONFIG_DEBUG_UART_PHYS +#define SCIF_VIRT ((SCIF_PHYS & 0x00ffffff) | 0xfd000000) + +#if CONFIG_DEBUG_UART_PHYS < 0xe6e00000 +/* SCIFA */ +#define FTDR 0x20 +#define FSR 0x14 +#else +/* SCIF */ +#define FTDR 0x0c +#define FSR 0x10 +#endif + +#define TDFE (1 << 5) +#define TEND (1 << 6) + + .macro addruart, rp, rv, tmp + ldr \rp, =SCIF_PHYS + ldr \rv, =SCIF_VIRT + .endm + + .macro waituart, rd, rx +1001: ldrh \rd, [\rx, #FSR] + tst \rd, #TDFE + beq 1001b + .endm + + .macro senduart, rd, rx + strb \rd, [\rx, #FTDR] + ldrh \rd, [\rx, #FSR] + bic \rd, \rd, #TEND + strh \rd, [\rx, #FSR] + .endm + + .macro busyuart, rd, rx +1001: ldrh \rd, [\rx, #FSR] + tst \rd, #TEND + beq 1001b + .endm diff --git a/arch/arm/mach-shmobile/setup-r8a7740.c b/arch/arm/mach-shmobile/setup-r8a7740.c index 8894e1b7ab0e..0bfe2261c4e7 100644 --- a/arch/arm/mach-shmobile/setup-r8a7740.c +++ b/arch/arm/mach-shmobile/setup-r8a7740.c @@ -71,6 +71,7 @@ static struct map_desc r8a7740_io_desc[] __initdata = { void __init r8a7740_map_io(void) { + debug_ll_io_init(); iotable_init(r8a7740_io_desc, ARRAY_SIZE(r8a7740_io_desc)); } diff --git a/arch/arm/mach-shmobile/setup-r8a7779.c b/arch/arm/mach-shmobile/setup-r8a7779.c index 136078ab9407..434d1504066a 100644 --- a/arch/arm/mach-shmobile/setup-r8a7779.c +++ b/arch/arm/mach-shmobile/setup-r8a7779.c @@ -70,6 +70,7 @@ static struct map_desc r8a7779_io_desc[] __initdata = { void __init r8a7779_map_io(void) { + debug_ll_io_init(); iotable_init(r8a7779_io_desc, ARRAY_SIZE(r8a7779_io_desc)); } diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c index 769ff008571d..322e2dc3fa36 100644 --- a/arch/arm/mach-shmobile/setup-sh7372.c +++ b/arch/arm/mach-shmobile/setup-sh7372.c @@ -60,6 +60,7 @@ static struct map_desc sh7372_io_desc[] __initdata = { void __init sh7372_map_io(void) { + debug_ll_io_init(); iotable_init(sh7372_io_desc, ARRAY_SIZE(sh7372_io_desc)); } diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c index 3447ca7e90d9..fa7cab820ab9 100644 --- a/arch/arm/mach-shmobile/setup-sh73a0.c +++ b/arch/arm/mach-shmobile/setup-sh73a0.c @@ -58,6 +58,7 @@ static struct map_desc sh73a0_io_desc[] __initdata = { void __init sh73a0_map_io(void) { + debug_ll_io_init(); iotable_init(sh73a0_io_desc, ARRAY_SIZE(sh73a0_io_desc)); } -- cgit v1.2.3-59-g8ed1b From 8dad0c51dac3f906b213ef3486cad39a06419252 Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Wed, 12 Nov 2014 16:12:17 +1100 Subject: sun3_scsi: Adopt atari_NCR5380 core driver and remove sun3_NCR5380.c Given the preceding changes to atari_NCR5380.c, this patch should not change behaviour of the sun3_scsi and sun3_scsi_vme modules. Signed-off-by: Finn Thain Reviewed-by: Hannes Reinecke Signed-off-by: Christoph Hellwig --- MAINTAINERS | 1 - drivers/scsi/sun3_NCR5380.c | 2848 ------------------------------------------- drivers/scsi/sun3_scsi.c | 10 +- 3 files changed, 6 insertions(+), 2853 deletions(-) delete mode 100644 drivers/scsi/sun3_NCR5380.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 84d9c5dea6bc..d206f3779306 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6304,7 +6304,6 @@ F: drivers/scsi/g_NCR5380.* F: drivers/scsi/g_NCR5380_mmio.c F: drivers/scsi/mac_scsi.* F: drivers/scsi/pas16.* -F: drivers/scsi/sun3_NCR5380.c F: drivers/scsi/sun3_scsi.* F: drivers/scsi/sun3_scsi_vme.c F: drivers/scsi/t128.* diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c deleted file mode 100644 index 0cb25f6d58b2..000000000000 --- a/drivers/scsi/sun3_NCR5380.c +++ /dev/null @@ -1,2848 +0,0 @@ -/* sun3_NCR5380.c -- adapted from atari_NCR5380.c for the sun3 by - Sam Creasey. */ -/* - * NCR 5380 generic driver routines. These should make it *trivial* - * to implement 5380 SCSI drivers under Linux with a non-trantor - * architecture. - * - * Note that these routines also work with NR53c400 family chips. - * - * Copyright 1993, Drew Eckhardt - * Visionary Computing - * (Unix and Linux consulting and custom programming) - * drew@colorado.edu - * +1 (303) 666-5836 - * - * For more information, please consult - * - * NCR 5380 Family - * SCSI Protocol Controller - * Databook - * - * NCR Microelectronics - * 1635 Aeroplaza Drive - * Colorado Springs, CO 80916 - * 1+ (719) 578-3400 - * 1+ (800) 334-5454 - */ - -/* - * ++roman: To port the 5380 driver to the Atari, I had to do some changes in - * this file, too: - * - * - Some of the debug statements were incorrect (undefined variables and the - * like). I fixed that. - * - * - In information_transfer(), I think a #ifdef was wrong. Looking at the - * possible DMA transfer size should also happen for REAL_DMA. I added this - * in the #if statement. - * - * - When using real DMA, information_transfer() should return in a DATAOUT - * phase after starting the DMA. It has nothing more to do. - * - * - The interrupt service routine should run main after end of DMA, too (not - * only after RESELECTION interrupts). Additionally, it should _not_ test - * for more interrupts after running main, since a DMA process may have - * been started and interrupts are turned on now. The new int could happen - * inside the execution of NCR5380_intr(), leading to recursive - * calls. - * - * - I've deleted all the stuff for AUTOPROBE_IRQ, REAL_DMA_POLL, PSEUDO_DMA - * and USLEEP, because these were messing up readability and will never be - * needed for Atari SCSI. - * - * - I've revised the NCR5380_main() calling scheme (relax the 'main_running' - * stuff), and 'main' is executed in a bottom half if awoken by an - * interrupt. - * - * - The code was quite cluttered up by "#if (NDEBUG & NDEBUG_*) printk..." - * constructs. In my eyes, this made the source rather unreadable, so I - * finally replaced that by the *_PRINTK() macros. - * - */ -#include -#include - -/* - * Further development / testing that should be done : - * 1. Test linked command handling code after Eric is ready with - * the high level code. - */ - -#if (NDEBUG & NDEBUG_LISTS) -#define LIST(x,y) \ - { printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); \ - if ((x)==(y)) udelay(5); } -#define REMOVE(w,x,y,z) \ - { printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, \ - (void*)(w), (void*)(x), (void*)(y), (void*)(z)); \ - if ((x)==(y)) udelay(5); } -#else -#define LIST(x,y) -#define REMOVE(w,x,y,z) -#endif - -#ifndef notyet -#undef LINKED -#endif - -/* - * Design - * Issues : - * - * The other Linux SCSI drivers were written when Linux was Intel PC-only, - * and specifically for each board rather than each chip. This makes their - * adaptation to platforms like the Mac (Some of which use NCR5380's) - * more difficult than it has to be. - * - * Also, many of the SCSI drivers were written before the command queuing - * routines were implemented, meaning their implementations of queued - * commands were hacked on rather than designed in from the start. - * - * When I designed the Linux SCSI drivers I figured that - * while having two different SCSI boards in a system might be useful - * for debugging things, two of the same type wouldn't be used. - * Well, I was wrong and a number of users have mailed me about running - * multiple high-performance SCSI boards in a server. - * - * Finally, when I get questions from users, I have no idea what - * revision of my driver they are running. - * - * This driver attempts to address these problems : - * This is a generic 5380 driver. To use it on a different platform, - * one simply writes appropriate system specific macros (ie, data - * transfer - some PC's will use the I/O bus, 68K's must use - * memory mapped) and drops this file in their 'C' wrapper. - * - * As far as command queueing, two queues are maintained for - * each 5380 in the system - commands that haven't been issued yet, - * and commands that are currently executing. This means that an - * unlimited number of commands may be queued, letting - * more commands propagate from the higher driver levels giving higher - * throughput. Note that both I_T_L and I_T_L_Q nexuses are supported, - * allowing multiple commands to propagate all the way to a SCSI-II device - * while a command is already executing. - * - * To solve the multiple-boards-in-the-same-system problem, - * there is a separate instance structure for each instance - * of a 5380 in the system. So, multiple NCR5380 drivers will - * be able to coexist with appropriate changes to the high level - * SCSI code. - * - * Issues specific to the NCR5380 : - * - * When used in a PIO or pseudo-dma mode, the NCR5380 is a braindead - * piece of hardware that requires you to sit in a loop polling for - * the REQ signal as long as you are connected. Some devices are - * brain dead (ie, many TEXEL CD ROM drives) and won't disconnect - * while doing long seek operations. - * - * The workaround for this is to keep track of devices that have - * disconnected. If the device hasn't disconnected, for commands that - * should disconnect, we do something like - * - * while (!REQ is asserted) { sleep for N usecs; poll for M usecs } - * - * Some tweaking of N and M needs to be done. An algorithm based - * on "time to data" would give the best results as long as short time - * to datas (ie, on the same track) were considered, however these - * broken devices are the exception rather than the rule and I'd rather - * spend my time optimizing for the normal case. - * - * Architecture : - * - * At the heart of the design is a coroutine, NCR5380_main, - * which is started when not running by the interrupt handler, - * timer, and queue command function. It attempts to establish - * I_T_L or I_T_L_Q nexuses by removing the commands from the - * issue queue and calling NCR5380_select() if a nexus - * is not established. - * - * Once a nexus is established, the NCR5380_information_transfer() - * phase goes through the various phases as instructed by the target. - * if the target goes into MSG IN and sends a DISCONNECT message, - * the command structure is placed into the per instance disconnected - * queue, and NCR5380_main tries to find more work. If USLEEP - * was defined, and the target is idle for too long, the system - * will try to sleep. - * - * If a command has disconnected, eventually an interrupt will trigger, - * calling NCR5380_intr() which will in turn call NCR5380_reselect - * to reestablish a nexus. This will run main if necessary. - * - * On command termination, the done function will be called as - * appropriate. - * - * SCSI pointers are maintained in the SCp field of SCSI command - * structures, being initialized after the command is connected - * in NCR5380_select, and set as appropriate in NCR5380_information_transfer. - * Note that in violation of the standard, an implicit SAVE POINTERS operation - * is done, since some BROKEN disks fail to issue an explicit SAVE POINTERS. - */ - -/* - * Using this file : - * This file a skeleton Linux SCSI driver for the NCR 5380 series - * of chips. To use it, you write an architecture specific functions - * and macros and include this file in your driver. - * - * These macros control options : - * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically - * for commands that return with a CHECK CONDITION status. - * - * LINKED - if defined, linked commands are supported. - * - * REAL_DMA - if defined, REAL DMA is used during the data transfer phases. - * - * SUPPORT_TAGS - if defined, SCSI-2 tagged queuing is used where possible - * - * These macros MUST be defined : - * - * NCR5380_read(register) - read from the specified register - * - * NCR5380_write(register, value) - write to the specific register - * - * Either real DMA *or* pseudo DMA may be implemented - * REAL functions : - * NCR5380_REAL_DMA should be defined if real DMA is to be used. - * Note that the DMA setup functions should return the number of bytes - * that they were able to program the controller for. - * - * Also note that generic i386/PC versions of these macros are - * available as NCR5380_i386_dma_write_setup, - * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual. - * - * NCR5380_dma_write_setup(instance, src, count) - initialize - * NCR5380_dma_read_setup(instance, dst, count) - initialize - * NCR5380_dma_residual(instance); - residual count - * - * PSEUDO functions : - * NCR5380_pwrite(instance, src, count) - * NCR5380_pread(instance, dst, count); - * - * If nothing specific to this implementation needs doing (ie, with external - * hardware), you must also define - * - * NCR5380_queue_command - * NCR5380_reset - * NCR5380_abort - * - * to be the global entry points into the specific driver, ie - * #define NCR5380_queue_command t128_queue_command. - * - * If this is not done, the routines will be defined as static functions - * with the NCR5380* names and the user must provide a globally - * accessible wrapper function. - * - * The generic driver is initialized by calling NCR5380_init(instance), - * after setting the appropriate host specific fields and ID. If the - * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance, - * possible) function may be used. - */ - -static struct Scsi_Host *first_instance = NULL; -static struct scsi_host_template *the_template = NULL; - -/* Macros ease life... :-) */ -#define SETUP_HOSTDATA(in) \ - struct NCR5380_hostdata *hostdata = \ - (struct NCR5380_hostdata *)(in)->hostdata -#define HOSTDATA(in) ((struct NCR5380_hostdata *)(in)->hostdata) - -#define NEXT(cmd) ((struct scsi_cmnd *)(cmd)->host_scribble) -#define SET_NEXT(cmd, next) ((cmd)->host_scribble = (void *)(next)) -#define NEXTADDR(cmd) ((struct scsi_cmnd **)&((cmd)->host_scribble)) - -#define HOSTNO instance->host_no -#define H_NO(cmd) (cmd)->device->host->host_no - -#define SGADDR(buffer) (void *)(((unsigned long)sg_virt(((buffer))))) - -#ifdef SUPPORT_TAGS - -/* - * Functions for handling tagged queuing - * ===================================== - * - * ++roman (01/96): Now I've implemented SCSI-2 tagged queuing. Some notes: - * - * Using consecutive numbers for the tags is no good idea in my eyes. There - * could be wrong re-usings if the counter (8 bit!) wraps and some early - * command has been preempted for a long time. My solution: a bitfield for - * remembering used tags. - * - * There's also the problem that each target has a certain queue size, but we - * cannot know it in advance :-( We just see a QUEUE_FULL status being - * returned. So, in this case, the driver internal queue size assumption is - * reduced to the number of active tags if QUEUE_FULL is returned by the - * target. The command is returned to the mid-level, but with status changed - * to BUSY, since --as I've seen-- the mid-level can't handle QUEUE_FULL - * correctly. - * - * We're also not allowed running tagged commands as long as an untagged - * command is active. And REQUEST SENSE commands after a contingent allegiance - * condition _must_ be untagged. To keep track whether an untagged command has - * been issued, the host->busy array is still employed, as it is without - * support for tagged queuing. - * - * One could suspect that there are possible race conditions between - * is_lun_busy(), cmd_get_tag() and cmd_free_tag(). But I think this isn't the - * case: is_lun_busy() and cmd_get_tag() are both called from NCR5380_main(), - * which already guaranteed to be running at most once. It is also the only - * place where tags/LUNs are allocated. So no other allocation can slip - * between that pair, there could only happen a reselection, which can free a - * tag, but that doesn't hurt. Only the sequence in cmd_free_tag() becomes - * important: the tag bit must be cleared before 'nr_allocated' is decreased. - */ - -/* For the m68k, the number of bits in 'allocated' must be a multiple of 32! */ -#if (MAX_TAGS % 32) != 0 -#error "MAX_TAGS must be a multiple of 32!" -#endif - -typedef struct { - char allocated[MAX_TAGS/8]; - int nr_allocated; - int queue_size; -} TAG_ALLOC; - -static TAG_ALLOC TagAlloc[8][8]; /* 8 targets and 8 LUNs */ - - -static void __init init_tags( void ) -{ - int target, lun; - TAG_ALLOC *ta; - - if (!setup_use_tagged_queuing) - return; - - for( target = 0; target < 8; ++target ) { - for( lun = 0; lun < 8; ++lun ) { - ta = &TagAlloc[target][lun]; - memset( &ta->allocated, 0, MAX_TAGS/8 ); - ta->nr_allocated = 0; - /* At the beginning, assume the maximum queue size we could - * support (MAX_TAGS). This value will be decreased if the target - * returns QUEUE_FULL status. - */ - ta->queue_size = MAX_TAGS; - } - } -} - - -/* Check if we can issue a command to this LUN: First see if the LUN is marked - * busy by an untagged command. If the command should use tagged queuing, also - * check that there is a free tag and the target's queue won't overflow. This - * function should be called with interrupts disabled to avoid race - * conditions. - */ - -static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged) -{ - u8 lun = cmd->device->lun; - SETUP_HOSTDATA(cmd->device->host); - - if (hostdata->busy[cmd->device->id] & (1 << lun)) - return( 1 ); - if (!should_be_tagged || - !setup_use_tagged_queuing || !cmd->device->tagged_supported) - return( 0 ); - if (TagAlloc[cmd->device->id][lun].nr_allocated >= - TagAlloc[cmd->device->id][lun].queue_size ) { - dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n", - H_NO(cmd), cmd->device->id, lun ); - return( 1 ); - } - return( 0 ); -} - - -/* Allocate a tag for a command (there are no checks anymore, check_lun_busy() - * must be called before!), or reserve the LUN in 'busy' if the command is - * untagged. - */ - -static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged) -{ - u8 lun = cmd->device->lun; - SETUP_HOSTDATA(cmd->device->host); - - /* If we or the target don't support tagged queuing, allocate the LUN for - * an untagged command. - */ - if (!should_be_tagged || - !setup_use_tagged_queuing || !cmd->device->tagged_supported) { - cmd->tag = TAG_NONE; - hostdata->busy[cmd->device->id] |= (1 << lun); - dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged " - "command\n", H_NO(cmd), cmd->device->id, lun ); - } - else { - TAG_ALLOC *ta = &TagAlloc[cmd->device->id][lun]; - - cmd->tag = find_first_zero_bit( &ta->allocated, MAX_TAGS ); - set_bit( cmd->tag, &ta->allocated ); - ta->nr_allocated++; - dprintk(NDEBUG_TAGS, "scsi%d: using tag %d for target %d lun %d " - "(now %d tags in use)\n", - H_NO(cmd), cmd->tag, cmd->device->id, lun, - ta->nr_allocated ); - } -} - - -/* Mark the tag of command 'cmd' as free, or in case of an untagged command, - * unlock the LUN. - */ - -static void cmd_free_tag(struct scsi_cmnd *cmd) -{ - u8 lun = cmd->device->lun; - SETUP_HOSTDATA(cmd->device->host); - - if (cmd->tag == TAG_NONE) { - hostdata->busy[cmd->device->id] &= ~(1 << lun); - dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d untagged cmd finished\n", - H_NO(cmd), cmd->device->id, lun ); - } - else if (cmd->tag >= MAX_TAGS) { - printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n", - H_NO(cmd), cmd->tag ); - } - else { - TAG_ALLOC *ta = &TagAlloc[cmd->device->id][lun]; - clear_bit( cmd->tag, &ta->allocated ); - ta->nr_allocated--; - dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n", - H_NO(cmd), cmd->tag, cmd->device->id, lun ); - } -} - - -static void free_all_tags( void ) -{ - int target, lun; - TAG_ALLOC *ta; - - if (!setup_use_tagged_queuing) - return; - - for( target = 0; target < 8; ++target ) { - for( lun = 0; lun < 8; ++lun ) { - ta = &TagAlloc[target][lun]; - memset( &ta->allocated, 0, MAX_TAGS/8 ); - ta->nr_allocated = 0; - } - } -} - -#endif /* SUPPORT_TAGS */ - - -/* - * Function : void initialize_SCp(struct scsi_cmnd *cmd) - * - * Purpose : initialize the saved data pointers for cmd to point to the - * start of the buffer. - * - * Inputs : cmd - struct scsi_cmnd structure to have pointers reset. - */ - -static __inline__ void initialize_SCp(struct scsi_cmnd *cmd) -{ - /* - * Initialize the Scsi Pointer field so that all of the commands in the - * various queues are valid. - */ - - if (scsi_bufflen(cmd)) { - cmd->SCp.buffer = scsi_sglist(cmd); - cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1; - cmd->SCp.ptr = (char *) SGADDR(cmd->SCp.buffer); - cmd->SCp.this_residual = cmd->SCp.buffer->length; - } else { - cmd->SCp.buffer = NULL; - cmd->SCp.buffers_residual = 0; - cmd->SCp.ptr = NULL; - cmd->SCp.this_residual = 0; - } - -} - -#include - -#if NDEBUG -static struct { - unsigned char mask; - const char * name;} -signals[] = {{ SR_DBP, "PARITY"}, { SR_RST, "RST" }, { SR_BSY, "BSY" }, - { SR_REQ, "REQ" }, { SR_MSG, "MSG" }, { SR_CD, "CD" }, { SR_IO, "IO" }, - { SR_SEL, "SEL" }, {0, NULL}}, -basrs[] = {{BASR_ATN, "ATN"}, {BASR_ACK, "ACK"}, {0, NULL}}, -icrs[] = {{ICR_ASSERT_RST, "ASSERT RST"},{ICR_ASSERT_ACK, "ASSERT ACK"}, - {ICR_ASSERT_BSY, "ASSERT BSY"}, {ICR_ASSERT_SEL, "ASSERT SEL"}, - {ICR_ASSERT_ATN, "ASSERT ATN"}, {ICR_ASSERT_DATA, "ASSERT DATA"}, - {0, NULL}}, -mrs[] = {{MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"}, {MR_TARGET, "MODE TARGET"}, - {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"}, {MR_ENABLE_PAR_INTR, - "MODE PARITY INTR"}, {MR_ENABLE_EOP_INTR,"MODE EOP INTR"}, - {MR_MONITOR_BSY, "MODE MONITOR BSY"}, - {MR_DMA_MODE, "MODE DMA"}, {MR_ARBITRATE, "MODE ARBITRATION"}, - {0, NULL}}; - -/* - * Function : void NCR5380_print(struct Scsi_Host *instance) - * - * Purpose : print the SCSI bus signals for debugging purposes - * - * Input : instance - which NCR5380 - */ - -static void NCR5380_print(struct Scsi_Host *instance) { - unsigned char status, data, basr, mr, icr, i; - unsigned long flags; - - local_irq_save(flags); - data = NCR5380_read(CURRENT_SCSI_DATA_REG); - status = NCR5380_read(STATUS_REG); - mr = NCR5380_read(MODE_REG); - icr = NCR5380_read(INITIATOR_COMMAND_REG); - basr = NCR5380_read(BUS_AND_STATUS_REG); - local_irq_restore(flags); - printk("STATUS_REG: %02x ", status); - for (i = 0; signals[i].mask ; ++i) - if (status & signals[i].mask) - printk(",%s", signals[i].name); - printk("\nBASR: %02x ", basr); - for (i = 0; basrs[i].mask ; ++i) - if (basr & basrs[i].mask) - printk(",%s", basrs[i].name); - printk("\nICR: %02x ", icr); - for (i = 0; icrs[i].mask; ++i) - if (icr & icrs[i].mask) - printk(",%s", icrs[i].name); - printk("\nMODE: %02x ", mr); - for (i = 0; mrs[i].mask; ++i) - if (mr & mrs[i].mask) - printk(",%s", mrs[i].name); - printk("\n"); -} - -static struct { - unsigned char value; - const char *name; -} phases[] = { - {PHASE_DATAOUT, "DATAOUT"}, {PHASE_DATAIN, "DATAIN"}, {PHASE_CMDOUT, "CMDOUT"}, - {PHASE_STATIN, "STATIN"}, {PHASE_MSGOUT, "MSGOUT"}, {PHASE_MSGIN, "MSGIN"}, - {PHASE_UNKNOWN, "UNKNOWN"}}; - -/* - * Function : void NCR5380_print_phase(struct Scsi_Host *instance) - * - * Purpose : print the current SCSI phase for debugging purposes - * - * Input : instance - which NCR5380 - */ - -static void NCR5380_print_phase(struct Scsi_Host *instance) -{ - unsigned char status; - int i; - - status = NCR5380_read(STATUS_REG); - if (!(status & SR_REQ)) - printk(KERN_DEBUG "scsi%d: REQ not asserted, phase unknown.\n", HOSTNO); - else { - for (i = 0; (phases[i].value != PHASE_UNKNOWN) && - (phases[i].value != (status & PHASE_MASK)); ++i); - printk(KERN_DEBUG "scsi%d: phase %s\n", HOSTNO, phases[i].name); - } -} - -#endif - -/* - * ++roman: New scheme of calling NCR5380_main() - * - * If we're not in an interrupt, we can call our main directly, it cannot be - * already running. Else, we queue it on a task queue, if not 'main_running' - * tells us that a lower level is already executing it. This way, - * 'main_running' needs not be protected in a special way. - * - * queue_main() is a utility function for putting our main onto the task - * queue, if main_running is false. It should be called only from a - * interrupt or bottom half. - */ - -#include -#include -#include - -static volatile int main_running = 0; -static DECLARE_WORK(NCR5380_tqueue, NCR5380_main); - -static __inline__ void queue_main(void) -{ - if (!main_running) { - /* If in interrupt and NCR5380_main() not already running, - queue it on the 'immediate' task queue, to be processed - immediately after the current interrupt processing has - finished. */ - schedule_work(&NCR5380_tqueue); - } - /* else: nothing to do: the running NCR5380_main() will pick up - any newly queued command. */ -} - - -static inline void NCR5380_all_init (void) -{ - static int done = 0; - if (!done) { - dprintk(NDEBUG_INIT, "scsi : NCR5380_all_init()\n"); - done = 1; - } -} - -/** - * NCR58380_info - report driver and host information - * @instance: relevant scsi host instance - * - * For use as the host template info() handler. - * - * Locks: none - */ - -static const char *NCR5380_info(struct Scsi_Host *instance) -{ - struct NCR5380_hostdata *hostdata = shost_priv(instance); - - return hostdata->info; -} - -static void prepare_info(struct Scsi_Host *instance) -{ - struct NCR5380_hostdata *hostdata = shost_priv(instance); - - snprintf(hostdata->info, sizeof(hostdata->info), - "%s, io_port 0x%lx, n_io_port %d, " - "base 0x%lx, irq %d, " - "can_queue %d, cmd_per_lun %d, " - "sg_tablesize %d, this_id %d, " - "options { %s} ", - instance->hostt->name, instance->io_port, instance->n_io_port, - instance->base, instance->irq, - instance->can_queue, instance->cmd_per_lun, - instance->sg_tablesize, instance->this_id, -#ifdef DIFFERENTIAL - "DIFFERENTIAL " -#endif -#ifdef REAL_DMA - "REAL_DMA " -#endif -#ifdef PARITY - "PARITY " -#endif -#ifdef SUPPORT_TAGS - "SUPPORT_TAGS " -#endif - ""); -} - -/* - * Function : void NCR5380_print_status (struct Scsi_Host *instance) - * - * Purpose : print commands in the various queues, called from - * NCR5380_abort and NCR5380_debug to aid debugging. - * - * Inputs : instance, pointer to this instance. - */ - -static void lprint_Scsi_Cmnd(struct scsi_cmnd *cmd) -{ - int i, s; - unsigned char *command; - printk("scsi%d: destination target %d, lun %llu\n", - H_NO(cmd), cmd->device->id, cmd->device->lun); - printk(KERN_CONT " command = "); - command = cmd->cmnd; - printk(KERN_CONT "%2d (0x%02x)", command[0], command[0]); - for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) - printk(KERN_CONT " %02x", command[i]); - printk("\n"); -} - -static void NCR5380_print_status(struct Scsi_Host *instance) -{ - struct NCR5380_hostdata *hostdata; - struct scsi_cmnd *ptr; - unsigned long flags; - - NCR5380_dprint(NDEBUG_ANY, instance); - NCR5380_dprint_phase(NDEBUG_ANY, instance); - - hostdata = (struct NCR5380_hostdata *)instance->hostdata; - - local_irq_save(flags); - printk("NCR5380: coroutine is%s running.\n", - main_running ? "" : "n't"); - if (!hostdata->connected) - printk("scsi%d: no currently connected command\n", HOSTNO); - else - lprint_Scsi_Cmnd((struct scsi_cmnd *) hostdata->connected); - printk("scsi%d: issue_queue\n", HOSTNO); - for (ptr = (struct scsi_cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr)) - lprint_Scsi_Cmnd(ptr); - - printk("scsi%d: disconnected_queue\n", HOSTNO); - for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr; - ptr = NEXT(ptr)) - lprint_Scsi_Cmnd(ptr); - - local_irq_restore(flags); - printk("\n"); -} - -static void show_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m) -{ - int i, s; - unsigned char *command; - seq_printf(m, "scsi%d: destination target %d, lun %llu\n", - H_NO(cmd), cmd->device->id, cmd->device->lun); - seq_printf(m, " command = "); - command = cmd->cmnd; - seq_printf(m, "%2d (0x%02x)", command[0], command[0]); - for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) - seq_printf(m, " %02x", command[i]); - seq_printf(m, "\n"); -} - -static int __maybe_unused NCR5380_show_info(struct seq_file *m, - struct Scsi_Host *instance) -{ - struct NCR5380_hostdata *hostdata; - struct scsi_cmnd *ptr; - unsigned long flags; - - hostdata = (struct NCR5380_hostdata *)instance->hostdata; - - local_irq_save(flags); - seq_printf(m, "NCR5380: coroutine is%s running.\n", - main_running ? "" : "n't"); - if (!hostdata->connected) - seq_printf(m, "scsi%d: no currently connected command\n", HOSTNO); - else - show_Scsi_Cmnd((struct scsi_cmnd *) hostdata->connected, m); - seq_printf(m, "scsi%d: issue_queue\n", HOSTNO); - for (ptr = (struct scsi_cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr)) - show_Scsi_Cmnd(ptr, m); - - seq_printf(m, "scsi%d: disconnected_queue\n", HOSTNO); - for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr; - ptr = NEXT(ptr)) - show_Scsi_Cmnd(ptr, m); - - local_irq_restore(flags); - return 0; -} - -/* - * Function : void NCR5380_init (struct Scsi_Host *instance) - * - * Purpose : initializes *instance and corresponding 5380 chip. - * - * Inputs : instance - instantiation of the 5380 driver. - * - * Notes : I assume that the host, hostno, and id bits have been - * set correctly. I don't care about the irq and other fields. - * - */ - -static int __init NCR5380_init(struct Scsi_Host *instance, int flags) -{ - int i; - SETUP_HOSTDATA(instance); - - NCR5380_all_init(); - - hostdata->aborted = 0; - hostdata->id_mask = 1 << instance->this_id; - hostdata->id_higher_mask = 0; - for (i = hostdata->id_mask; i <= 0x80; i <<= 1) - if (i > hostdata->id_mask) - hostdata->id_higher_mask |= i; - for (i = 0; i < 8; ++i) - hostdata->busy[i] = 0; -#ifdef SUPPORT_TAGS - init_tags(); -#endif -#if defined (REAL_DMA) - hostdata->dma_len = 0; -#endif - hostdata->targets_present = 0; - hostdata->connected = NULL; - hostdata->issue_queue = NULL; - hostdata->disconnected_queue = NULL; - hostdata->flags = FLAG_CHECK_LAST_BYTE_SENT; - - if (!the_template) { - the_template = instance->hostt; - first_instance = instance; - } - - prepare_info(instance); - - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - NCR5380_write(MODE_REG, MR_BASE); - NCR5380_write(TARGET_COMMAND_REG, 0); - NCR5380_write(SELECT_ENABLE_REG, 0); - - return 0; -} - -static void NCR5380_exit(struct Scsi_Host *instance) -{ - /* Empty, as we didn't schedule any delayed work */ -} - -/* - * Function : int NCR5380_queue_command (struct scsi_cmnd *cmd, - * void (*done)(struct scsi_cmnd *)) - * - * Purpose : enqueues a SCSI command - * - * Inputs : cmd - SCSI command, done - function called on completion, with - * a pointer to the command descriptor. - * - * Returns : 0 - * - * Side effects : - * cmd is added to the per instance issue_queue, with minor - * twiddling done to the host specific fields of cmd. If the - * main coroutine is not running, it is restarted. - * - */ - -/* Only make static if a wrapper function is used */ -static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd, - void (*done)(struct scsi_cmnd *)) -{ - SETUP_HOSTDATA(cmd->device->host); - struct scsi_cmnd *tmp; - unsigned long flags; - -#if (NDEBUG & NDEBUG_NO_WRITE) - switch (cmd->cmnd[0]) { - case WRITE_6: - case WRITE_10: - printk(KERN_NOTICE "scsi%d: WRITE attempted with NO_WRITE debugging flag set\n", - H_NO(cmd)); - cmd->result = (DID_ERROR << 16); - done(cmd); - return 0; - } -#endif /* (NDEBUG & NDEBUG_NO_WRITE) */ - - /* - * We use the host_scribble field as a pointer to the next command - * in a queue - */ - - SET_NEXT(cmd, NULL); - cmd->scsi_done = done; - - cmd->result = 0; - - - /* - * Insert the cmd into the issue queue. Note that REQUEST SENSE - * commands are added to the head of the queue since any command will - * clear the contingent allegiance condition that exists and the - * sense data is only guaranteed to be valid while the condition exists. - */ - - local_irq_save(flags); - /* ++guenther: now that the issue queue is being set up, we can lock ST-DMA. - * Otherwise a running NCR5380_main may steal the lock. - * Lock before actually inserting due to fairness reasons explained in - * atari_scsi.c. If we insert first, then it's impossible for this driver - * to release the lock. - * Stop timer for this command while waiting for the lock, or timeouts - * may happen (and they really do), and it's no good if the command doesn't - * appear in any of the queues. - * ++roman: Just disabling the NCR interrupt isn't sufficient here, - * because also a timer int can trigger an abort or reset, which would - * alter queues and touch the lock. - */ - if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) { - LIST(cmd, hostdata->issue_queue); - SET_NEXT(cmd, hostdata->issue_queue); - hostdata->issue_queue = cmd; - } else { - for (tmp = (struct scsi_cmnd *)hostdata->issue_queue; - NEXT(tmp); tmp = NEXT(tmp)) - ; - LIST(cmd, tmp); - SET_NEXT(tmp, cmd); - } - - local_irq_restore(flags); - - dprintk(NDEBUG_QUEUES, "scsi%d: command added to %s of queue\n", H_NO(cmd), - (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); - - /* If queue_command() is called from an interrupt (real one or bottom - * half), we let queue_main() do the job of taking care about main. If it - * is already running, this is a no-op, else main will be queued. - * - * If we're not in an interrupt, we can call NCR5380_main() - * unconditionally, because it cannot be already running. - */ - if (in_interrupt() || ((flags >> 8) & 7) >= 6) - queue_main(); - else - NCR5380_main(NULL); - return 0; -} - -static DEF_SCSI_QCMD(NCR5380_queue_command) - -/* - * Function : NCR5380_main (void) - * - * Purpose : NCR5380_main is a coroutine that runs as long as more work can - * be done on the NCR5380 host adapters in a system. Both - * NCR5380_queue_command() and NCR5380_intr() will try to start it - * in case it is not running. - * - * NOTE : NCR5380_main exits with interrupts *disabled*, the caller should - * reenable them. This prevents reentrancy and kernel stack overflow. - */ - -static void NCR5380_main (struct work_struct *bl) -{ - struct scsi_cmnd *tmp, *prev; - struct Scsi_Host *instance = first_instance; - struct NCR5380_hostdata *hostdata = HOSTDATA(instance); - int done; - unsigned long flags; - - /* - * We run (with interrupts disabled) until we're sure that none of - * the host adapters have anything that can be done, at which point - * we set main_running to 0 and exit. - * - * Interrupts are enabled before doing various other internal - * instructions, after we've decided that we need to run through - * the loop again. - * - * this should prevent any race conditions. - * - * ++roman: Just disabling the NCR interrupt isn't sufficient here, - * because also a timer int can trigger an abort or reset, which can - * alter queues and touch the Falcon lock. - */ - - /* Tell int handlers main() is now already executing. Note that - no races are possible here. If an int comes in before - 'main_running' is set here, and queues/executes main via the - task queue, it doesn't do any harm, just this instance of main - won't find any work left to do. */ - if (main_running) - return; - main_running = 1; - - local_save_flags(flags); - do { - local_irq_disable(); /* Freeze request queues */ - done = 1; - - if (!hostdata->connected) { - dprintk(NDEBUG_MAIN, "scsi%d: not connected\n", HOSTNO ); - /* - * Search through the issue_queue for a command destined - * for a target that's not busy. - */ -#if (NDEBUG & NDEBUG_LISTS) - for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, prev = NULL; - tmp && (tmp != prev); prev = tmp, tmp = NEXT(tmp)) - ; - if ((tmp == prev) && tmp) printk(" LOOP\n");/* else printk("\n");*/ -#endif - for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, - prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp) ) { - - if (prev != tmp) - dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%llu\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun); - /* When we find one, remove it from the issue queue. */ - /* ++guenther: possible race with Falcon locking */ - if ( -#ifdef SUPPORT_TAGS - !is_lun_busy( tmp, tmp->cmnd[0] != REQUEST_SENSE) -#else - !(hostdata->busy[tmp->device->id] & (1 << tmp->device->lun)) -#endif - ) { - /* ++guenther: just to be sure, this must be atomic */ - local_irq_disable(); - if (prev) { - REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); - SET_NEXT(prev, NEXT(tmp)); - } else { - REMOVE(-1, hostdata->issue_queue, tmp, NEXT(tmp)); - hostdata->issue_queue = NEXT(tmp); - } - SET_NEXT(tmp, NULL); - - /* reenable interrupts after finding one */ - local_irq_restore(flags); - - /* - * Attempt to establish an I_T_L nexus here. - * On success, instance->hostdata->connected is set. - * On failure, we must add the command back to the - * issue queue so we can keep trying. - */ - dprintk(NDEBUG_MAIN, "scsi%d: main(): command for target %d " - "lun %llu removed from issue_queue\n", - HOSTNO, tmp->device->id, tmp->device->lun); - /* - * REQUEST SENSE commands are issued without tagged - * queueing, even on SCSI-II devices because the - * contingent allegiance condition exists for the - * entire unit. - */ - /* ++roman: ...and the standard also requires that - * REQUEST SENSE command are untagged. - */ - -#ifdef SUPPORT_TAGS - cmd_get_tag( tmp, tmp->cmnd[0] != REQUEST_SENSE ); -#endif - if (!NCR5380_select(instance, tmp)) { - break; - } else { - local_irq_disable(); - LIST(tmp, hostdata->issue_queue); - SET_NEXT(tmp, hostdata->issue_queue); - hostdata->issue_queue = tmp; -#ifdef SUPPORT_TAGS - cmd_free_tag( tmp ); -#endif - local_irq_restore(flags); - dprintk(NDEBUG_MAIN, "scsi%d: main(): select() failed, " - "returned to issue_queue\n", HOSTNO); - if (hostdata->connected) - break; - } - } /* if target/lun/target queue is not busy */ - } /* for issue_queue */ - } /* if (!hostdata->connected) */ - if (hostdata->connected -#ifdef REAL_DMA - && !hostdata->dma_len -#endif - ) { - local_irq_restore(flags); - dprintk(NDEBUG_MAIN, "scsi%d: main: performing information transfer\n", - HOSTNO); - NCR5380_information_transfer(instance); - dprintk(NDEBUG_MAIN, "scsi%d: main: done set false\n", HOSTNO); - done = 0; - } - } while (!done); - - /* Better allow ints _after_ 'main_running' has been cleared, else - an interrupt could believe we'll pick up the work it left for - us, but we won't see it anymore here... */ - main_running = 0; - local_irq_restore(flags); -} - - -#ifdef REAL_DMA -/* - * Function : void NCR5380_dma_complete (struct Scsi_Host *instance) - * - * Purpose : Called by interrupt handler when DMA finishes or a phase - * mismatch occurs (which would finish the DMA transfer). - * - * Inputs : instance - this instance of the NCR5380. - * - */ - -static void NCR5380_dma_complete( struct Scsi_Host *instance ) -{ - SETUP_HOSTDATA(instance); - int transfered; - unsigned char **data; - volatile int *count; - - if (!hostdata->connected) { - printk(KERN_WARNING "scsi%d: received end of DMA interrupt with " - "no connected cmd\n", HOSTNO); - return; - } - - dprintk(NDEBUG_DMA, "scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n", - HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), - NCR5380_read(STATUS_REG)); - - if((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) { - printk("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n", HOSTNO); - printk("please e-mail sammy@sammy.net with a description of how this\n"); - printk("error was produced.\n"); - BUG(); - } - - /* make sure we're not stuck in a data phase */ - if((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | - BASR_ACK)) == - (BASR_PHASE_MATCH | BASR_ACK)) { - printk("scsi%d: BASR %02x\n", HOSTNO, NCR5380_read(BUS_AND_STATUS_REG)); - printk("scsi%d: bus stuck in data phase -- probably a single byte " - "overrun!\n", HOSTNO); - printk("not prepared for this error!\n"); - printk("please e-mail sammy@sammy.net with a description of how this\n"); - printk("error was produced.\n"); - BUG(); - } - - - - (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); - NCR5380_write(MODE_REG, MR_BASE); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - - transfered = hostdata->dma_len - NCR5380_dma_residual(instance); - hostdata->dma_len = 0; - - data = (unsigned char **) &(hostdata->connected->SCp.ptr); - count = &(hostdata->connected->SCp.this_residual); - *data += transfered; - *count -= transfered; - -} -#endif /* REAL_DMA */ - - -/* - * Function : void NCR5380_intr (int irq) - * - * Purpose : handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses - * from the disconnected queue, and restarting NCR5380_main() - * as required. - * - * Inputs : int irq, irq that caused this interrupt. - * - */ - -static irqreturn_t NCR5380_intr (int irq, void *dev_id) -{ - struct Scsi_Host *instance = first_instance; - int done = 1, handled = 0; - unsigned char basr; - - dprintk(NDEBUG_INTR, "scsi%d: NCR5380 irq triggered\n", HOSTNO); - - /* Look for pending interrupts */ - basr = NCR5380_read(BUS_AND_STATUS_REG); - dprintk(NDEBUG_INTR, "scsi%d: BASR=%02x\n", HOSTNO, basr); - /* dispatch to appropriate routine if found and done=0 */ - if (basr & BASR_IRQ) { - NCR5380_dprint(NDEBUG_INTR, instance); - if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) { - done = 0; - dprintk(NDEBUG_INTR, "scsi%d: SEL interrupt\n", HOSTNO); - NCR5380_reselect(instance); - (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); - } - else if (basr & BASR_PARITY_ERROR) { - dprintk(NDEBUG_INTR, "scsi%d: PARITY interrupt\n", HOSTNO); - (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); - } - else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { - dprintk(NDEBUG_INTR, "scsi%d: RESET interrupt\n", HOSTNO); - (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); - } - else { - /* - * The rest of the interrupt conditions can occur only during a - * DMA transfer - */ - -#if defined(REAL_DMA) - /* - * We should only get PHASE MISMATCH and EOP interrupts if we have - * DMA enabled, so do a sanity check based on the current setting - * of the MODE register. - */ - - if ((NCR5380_read(MODE_REG) & MR_DMA_MODE) && - ((basr & BASR_END_DMA_TRANSFER) || - !(basr & BASR_PHASE_MATCH))) { - - dprintk(NDEBUG_INTR, "scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); - NCR5380_dma_complete( instance ); - done = 0; - } else -#endif /* REAL_DMA */ - { -/* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */ - if (basr & BASR_PHASE_MATCH) - dprintk(NDEBUG_INTR, "scsi%d: unknown interrupt, " - "BASR 0x%x, MR 0x%x, SR 0x%x\n", - HOSTNO, basr, NCR5380_read(MODE_REG), - NCR5380_read(STATUS_REG)); - (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); -#ifdef SUN3_SCSI_VME - dregs->csr |= CSR_DMA_ENABLE; -#endif - } - } /* if !(SELECTION || PARITY) */ - handled = 1; - } /* BASR & IRQ */ - else { - - printk(KERN_NOTICE "scsi%d: interrupt without IRQ bit set in BASR, " - "BASR 0x%X, MR 0x%X, SR 0x%x\n", HOSTNO, basr, - NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)); - (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); -#ifdef SUN3_SCSI_VME - dregs->csr |= CSR_DMA_ENABLE; -#endif - } - - if (!done) { - dprintk(NDEBUG_INTR, "scsi%d: in int routine, calling main\n", HOSTNO); - /* Put a call to NCR5380_main() on the queue... */ - queue_main(); - } - return IRQ_RETVAL(handled); -} - -/* - * Function : int NCR5380_select(struct Scsi_Host *instance, - * struct scsi_cmnd *cmd) - * - * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command, - * including ARBITRATION, SELECTION, and initial message out for - * IDENTIFY and queue messages. - * - * Inputs : instance - instantiation of the 5380 driver on which this - * target lives, cmd - SCSI command to execute. - * - * Returns : -1 if selection could not execute for some reason, - * 0 if selection succeeded or failed because the target - * did not respond. - * - * Side effects : - * If bus busy, arbitration failed, etc, NCR5380_select() will exit - * with registers as they should have been on entry - ie - * SELECT_ENABLE will be set appropriately, the NCR5380 - * will cease to drive any SCSI bus signals. - * - * If successful : I_T_L or I_T_L_Q nexus will be established, - * instance->connected will be set to cmd. - * SELECT interrupt will be disabled. - * - * If failed (no target) : cmd->scsi_done() will be called, and the - * cmd->result host byte set to DID_BAD_TARGET. - */ - -static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd) -{ - SETUP_HOSTDATA(instance); - unsigned char tmp[3], phase; - unsigned char *data; - int len; - unsigned long timeout; - unsigned long flags; - - hostdata->restart_select = 0; - NCR5380_dprint(NDEBUG_ARBITRATION, instance); - dprintk(NDEBUG_ARBITRATION, "scsi%d: starting arbitration, id = %d\n", HOSTNO, - instance->this_id); - - /* - * Set the phase bits to 0, otherwise the NCR5380 won't drive the - * data bus during SELECTION. - */ - - local_irq_save(flags); - if (hostdata->connected) { - local_irq_restore(flags); - return -1; - } - NCR5380_write(TARGET_COMMAND_REG, 0); - - - /* - * Start arbitration. - */ - - NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask); - NCR5380_write(MODE_REG, MR_ARBITRATE); - - local_irq_restore(flags); - - /* Wait for arbitration logic to complete */ -#ifdef NCR_TIMEOUT - { - unsigned long timeout = jiffies + 2*NCR_TIMEOUT; - - while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS) - && time_before(jiffies, timeout) && !hostdata->connected) - ; - if (time_after_eq(jiffies, timeout)) - { - printk("scsi : arbitration timeout at %d\n", __LINE__); - NCR5380_write(MODE_REG, MR_BASE); - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - return -1; - } - } -#else /* NCR_TIMEOUT */ - while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS) - && !hostdata->connected); -#endif - - dprintk(NDEBUG_ARBITRATION, "scsi%d: arbitration complete\n", HOSTNO); - - if (hostdata->connected) { - NCR5380_write(MODE_REG, MR_BASE); - return -1; - } - /* - * The arbitration delay is 2.2us, but this is a minimum and there is - * no maximum so we can safely sleep for ceil(2.2) usecs to accommodate - * the integral nature of udelay(). - * - */ - - udelay(3); - - /* Check for lost arbitration */ - if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || - (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) || - (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || - hostdata->connected) { - NCR5380_write(MODE_REG, MR_BASE); - dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting MR_ARBITRATE\n", - HOSTNO); - return -1; - } - - /* after/during arbitration, BSY should be asserted. - IBM DPES-31080 Version S31Q works now */ - /* Tnx to Thomas_Roesch@m2.maus.de for finding this! (Roman) */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_SEL | - ICR_ASSERT_BSY ) ; - - if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || - hostdata->connected) { - NCR5380_write(MODE_REG, MR_BASE); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n", - HOSTNO); - return -1; - } - - /* - * Again, bus clear + bus settle time is 1.2us, however, this is - * a minimum so we'll udelay ceil(1.2) - */ - -#ifdef CONFIG_ATARI_SCSI_TOSHIBA_DELAY - /* ++roman: But some targets (see above :-) seem to need a bit more... */ - udelay(15); -#else - udelay(2); -#endif - - if (hostdata->connected) { - NCR5380_write(MODE_REG, MR_BASE); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - return -1; - } - - dprintk(NDEBUG_ARBITRATION, "scsi%d: won arbitration\n", HOSTNO); - - /* - * Now that we have won arbitration, start Selection process, asserting - * the host and target ID's on the SCSI bus. - */ - - NCR5380_write(OUTPUT_DATA_REG, (hostdata->id_mask | (1 << cmd->device->id))); - - /* - * Raise ATN while SEL is true before BSY goes false from arbitration, - * since this is the only way to guarantee that we'll get a MESSAGE OUT - * phase immediately after selection. - */ - - NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_BSY | - ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL )); - NCR5380_write(MODE_REG, MR_BASE); - - /* - * Reselect interrupts must be turned off prior to the dropping of BSY, - * otherwise we will trigger an interrupt. - */ - - if (hostdata->connected) { - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - return -1; - } - - NCR5380_write(SELECT_ENABLE_REG, 0); - - /* - * The initiator shall then wait at least two deskew delays and release - * the BSY signal. - */ - udelay(1); /* wingel -- wait two bus deskew delay >2*45ns */ - - /* Reset BSY */ - NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_DATA | - ICR_ASSERT_ATN | ICR_ASSERT_SEL)); - - /* - * Something weird happens when we cease to drive BSY - looks - * like the board/chip is letting us do another read before the - * appropriate propagation delay has expired, and we're confusing - * a BSY signal from ourselves as the target's response to SELECTION. - * - * A small delay (the 'C++' frontend breaks the pipeline with an - * unnecessary jump, making it work on my 386-33/Trantor T128, the - * tighter 'C' code breaks and requires this) solves the problem - - * the 1 us delay is arbitrary, and only used because this delay will - * be the same on other platforms and since it works here, it should - * work there. - * - * wingel suggests that this could be due to failing to wait - * one deskew delay. - */ - - udelay(1); - - dprintk(NDEBUG_SELECTION, "scsi%d: selecting target %d\n", HOSTNO, cmd->device->id); - - /* - * The SCSI specification calls for a 250 ms timeout for the actual - * selection. - */ - - timeout = jiffies + 25; - - /* - * XXX very interesting - we're seeing a bounce where the BSY we - * asserted is being reflected / still asserted (propagation delay?) - * and it's detecting as true. Sigh. - */ - -#if 0 - /* ++roman: If a target conformed to the SCSI standard, it wouldn't assert - * IO while SEL is true. But again, there are some disks out the in the - * world that do that nevertheless. (Somebody claimed that this announces - * reselection capability of the target.) So we better skip that test and - * only wait for BSY... (Famous german words: Der Klügere gibt nach :-) - */ - - while (time_before(jiffies, timeout) && !(NCR5380_read(STATUS_REG) & - (SR_BSY | SR_IO))); - - if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == - (SR_SEL | SR_IO)) { - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - NCR5380_reselect(instance); - printk (KERN_ERR "scsi%d: reselection after won arbitration?\n", - HOSTNO); - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - return -1; - } -#else - while (time_before(jiffies, timeout) && !(NCR5380_read(STATUS_REG) & SR_BSY)); -#endif - - /* - * No less than two deskew delays after the initiator detects the - * BSY signal is true, it shall release the SEL signal and may - * change the DATA BUS. -wingel - */ - - udelay(1); - - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); - - if (!(NCR5380_read(STATUS_REG) & SR_BSY)) { - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - if (hostdata->targets_present & (1 << cmd->device->id)) { - printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO); - if (hostdata->restart_select) - printk(KERN_NOTICE "\trestart select\n"); - NCR5380_dprint(NDEBUG_ANY, instance); - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - return -1; - } - cmd->result = DID_BAD_TARGET << 16; -#ifdef SUPPORT_TAGS - cmd_free_tag( cmd ); -#endif - cmd->scsi_done(cmd); - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - dprintk(NDEBUG_SELECTION, "scsi%d: target did not respond within 250ms\n", HOSTNO); - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - return 0; - } - - hostdata->targets_present |= (1 << cmd->device->id); - - /* - * Since we followed the SCSI spec, and raised ATN while SEL - * was true but before BSY was false during selection, the information - * transfer phase should be a MESSAGE OUT phase so that we can send the - * IDENTIFY message. - * - * If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG - * message (2 bytes) with a tag ID that we increment with every command - * until it wraps back to 0. - * - * XXX - it turns out that there are some broken SCSI-II devices, - * which claim to support tagged queuing but fail when more than - * some number of commands are issued at once. - */ - - /* Wait for start of REQ/ACK handshake */ - while (!(NCR5380_read(STATUS_REG) & SR_REQ)); - - dprintk(NDEBUG_SELECTION, "scsi%d: target %d selected, going into MESSAGE OUT phase.\n", - HOSTNO, cmd->device->id); - tmp[0] = IDENTIFY(1, cmd->device->lun); - -#ifdef SUPPORT_TAGS - if (cmd->tag != TAG_NONE) { - tmp[1] = hostdata->last_message = SIMPLE_QUEUE_TAG; - tmp[2] = cmd->tag; - len = 3; - } else - len = 1; -#else - len = 1; - cmd->tag=0; -#endif /* SUPPORT_TAGS */ - - /* Send message(s) */ - data = tmp; - phase = PHASE_MSGOUT; - NCR5380_transfer_pio(instance, &phase, &len, &data); - dprintk(NDEBUG_SELECTION, "scsi%d: nexus established.\n", HOSTNO); - /* XXX need to handle errors here */ - hostdata->connected = cmd; -#ifndef SUPPORT_TAGS - hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); -#endif -#ifdef SUN3_SCSI_VME - dregs->csr |= CSR_INTR; -#endif - initialize_SCp(cmd); - - - return 0; -} - -/* - * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance, - * unsigned char *phase, int *count, unsigned char **data) - * - * Purpose : transfers data in given phase using polled I/O - * - * Inputs : instance - instance of driver, *phase - pointer to - * what phase is expected, *count - pointer to number of - * bytes to transfer, **data - pointer to data pointer. - * - * Returns : -1 when different phase is entered without transferring - * maximum number of bytes, 0 if all bytes are transferred or exit - * is in same phase. - * - * Also, *phase, *count, *data are modified in place. - * - * XXX Note : handling for bus free may be useful. - */ - -/* - * Note : this code is not as quick as it could be, however it - * IS 100% reliable, and for the actual data transfer where speed - * counts, we will always do a pseudo DMA or DMA transfer. - */ - -static int NCR5380_transfer_pio( struct Scsi_Host *instance, - unsigned char *phase, int *count, - unsigned char **data) -{ - register unsigned char p = *phase, tmp; - register int c = *count; - register unsigned char *d = *data; - - /* - * The NCR5380 chip will only drive the SCSI bus when the - * phase specified in the appropriate bits of the TARGET COMMAND - * REGISTER match the STATUS REGISTER - */ - - NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); - - do { - /* - * Wait for assertion of REQ, after which the phase bits will be - * valid - */ - while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)); - - dprintk(NDEBUG_HANDSHAKE, "scsi%d: REQ detected\n", HOSTNO); - - /* Check for phase mismatch */ - if ((tmp & PHASE_MASK) != p) { - dprintk(NDEBUG_PIO, "scsi%d: phase mismatch\n", HOSTNO); - NCR5380_dprint_phase(NDEBUG_PIO, instance); - break; - } - - /* Do actual transfer from SCSI bus to / from memory */ - if (!(p & SR_IO)) - NCR5380_write(OUTPUT_DATA_REG, *d); - else - *d = NCR5380_read(CURRENT_SCSI_DATA_REG); - - ++d; - - /* - * The SCSI standard suggests that in MSGOUT phase, the initiator - * should drop ATN on the last byte of the message phase - * after REQ has been asserted for the handshake but before - * the initiator raises ACK. - */ - - if (!(p & SR_IO)) { - if (!((p & SR_MSG) && c > 1)) { - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | - ICR_ASSERT_DATA); - NCR5380_dprint(NDEBUG_PIO, instance); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | - ICR_ASSERT_DATA | ICR_ASSERT_ACK); - } else { - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | - ICR_ASSERT_DATA | ICR_ASSERT_ATN); - NCR5380_dprint(NDEBUG_PIO, instance); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | - ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK); - } - } else { - NCR5380_dprint(NDEBUG_PIO, instance); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); - } - - while (NCR5380_read(STATUS_REG) & SR_REQ); - - dprintk(NDEBUG_HANDSHAKE, "scsi%d: req false, handshake complete\n", HOSTNO); - -/* - * We have several special cases to consider during REQ/ACK handshaking : - * 1. We were in MSGOUT phase, and we are on the last byte of the - * message. ATN must be dropped as ACK is dropped. - * - * 2. We are in a MSGIN phase, and we are on the last byte of the - * message. We must exit with ACK asserted, so that the calling - * code may raise ATN before dropping ACK to reject the message. - * - * 3. ACK and ATN are clear and the target may proceed as normal. - */ - if (!(p == PHASE_MSGIN && c == 1)) { - if (p == PHASE_MSGOUT && c > 1) - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); - else - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - } - } while (--c); - - dprintk(NDEBUG_PIO, "scsi%d: residual %d\n", HOSTNO, c); - - *count = c; - *data = d; - tmp = NCR5380_read(STATUS_REG); - /* The phase read from the bus is valid if either REQ is (already) - * asserted or if ACK hasn't been released yet. The latter is the case if - * we're in MSGIN and all wanted bytes have been received. */ - if ((tmp & SR_REQ) || (p == PHASE_MSGIN && c == 0)) - *phase = tmp & PHASE_MASK; - else - *phase = PHASE_UNKNOWN; - - if (!c || (*phase == p)) - return 0; - else - return -1; -} - -/* - * Function : do_abort (Scsi_Host *host) - * - * Purpose : abort the currently established nexus. Should only be - * called from a routine which can drop into a - * - * Returns : 0 on success, -1 on failure. - */ - -static int do_abort (struct Scsi_Host *host) -{ - unsigned char tmp, *msgptr, phase; - int len; - - /* Request message out phase */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); - - /* - * Wait for the target to indicate a valid phase by asserting - * REQ. Once this happens, we'll have either a MSGOUT phase - * and can immediately send the ABORT message, or we'll have some - * other phase and will have to source/sink data. - * - * We really don't care what value was on the bus or what value - * the target sees, so we just handshake. - */ - - while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)); - - NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); - - if ((tmp & PHASE_MASK) != PHASE_MSGOUT) { - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | - ICR_ASSERT_ACK); - while (NCR5380_read(STATUS_REG) & SR_REQ); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); - } - - tmp = ABORT; - msgptr = &tmp; - len = 1; - phase = PHASE_MSGOUT; - NCR5380_transfer_pio (host, &phase, &len, &msgptr); - - /* - * If we got here, and the command completed successfully, - * we're about to go into bus free state. - */ - - return len ? -1 : 0; -} - -#if defined(REAL_DMA) -/* - * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance, - * unsigned char *phase, int *count, unsigned char **data) - * - * Purpose : transfers data in given phase using either real - * or pseudo DMA. - * - * Inputs : instance - instance of driver, *phase - pointer to - * what phase is expected, *count - pointer to number of - * bytes to transfer, **data - pointer to data pointer. - * - * Returns : -1 when different phase is entered without transferring - * maximum number of bytes, 0 if all bytes or transferred or exit - * is in same phase. - * - * Also, *phase, *count, *data are modified in place. - * - */ - - -static int NCR5380_transfer_dma( struct Scsi_Host *instance, - unsigned char *phase, int *count, - unsigned char **data) -{ - SETUP_HOSTDATA(instance); - register int c = *count; - register unsigned char p = *phase; - unsigned long flags; - - /* sanity check */ - if(!sun3_dma_setup_done) { - printk("scsi%d: transfer_dma without setup!\n", HOSTNO); - BUG(); - } - hostdata->dma_len = c; - - dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n", - HOSTNO, (p & SR_IO) ? "reading" : "writing", - c, (p & SR_IO) ? "to" : "from", *data); - - /* netbsd turns off ints here, why not be safe and do it too */ - local_irq_save(flags); - - /* send start chain */ - sun3scsi_dma_start(c, *data); - - if (p & SR_IO) { - NCR5380_write(TARGET_COMMAND_REG, 1); - NCR5380_read(RESET_PARITY_INTERRUPT_REG); - NCR5380_write(INITIATOR_COMMAND_REG, 0); - NCR5380_write(MODE_REG, (NCR5380_read(MODE_REG) | MR_DMA_MODE | MR_ENABLE_EOP_INTR)); - NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0); - } else { - NCR5380_write(TARGET_COMMAND_REG, 0); - NCR5380_read(RESET_PARITY_INTERRUPT_REG); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_DATA); - NCR5380_write(MODE_REG, (NCR5380_read(MODE_REG) | MR_DMA_MODE | MR_ENABLE_EOP_INTR)); - NCR5380_write(START_DMA_SEND_REG, 0); - } - -#ifdef SUN3_SCSI_VME - dregs->csr |= CSR_DMA_ENABLE; -#endif - - local_irq_restore(flags); - - sun3_dma_active = 1; - return 0; -} -#endif /* defined(REAL_DMA) */ - -/* - * Function : NCR5380_information_transfer (struct Scsi_Host *instance) - * - * Purpose : run through the various SCSI phases and do as the target - * directs us to. Operates on the currently connected command, - * instance->connected. - * - * Inputs : instance, instance for which we are doing commands - * - * Side effects : SCSI things happen, the disconnected queue will be - * modified if a command disconnects, *instance->connected will - * change. - * - * XXX Note : we need to watch for bus free or a reset condition here - * to recover from an unexpected bus free condition. - */ - -static void NCR5380_information_transfer (struct Scsi_Host *instance) -{ - SETUP_HOSTDATA(instance); - unsigned long flags; - unsigned char msgout = NOP; - int sink = 0; - int len; -#if defined(REAL_DMA) - int transfersize; -#endif - unsigned char *data; - unsigned char phase, tmp, extended_msg[10], old_phase=0xff; - struct scsi_cmnd *cmd = (struct scsi_cmnd *) hostdata->connected; - -#ifdef SUN3_SCSI_VME - dregs->csr |= CSR_INTR; -#endif - - while (1) { - tmp = NCR5380_read(STATUS_REG); - /* We only have a valid SCSI phase when REQ is asserted */ - if (tmp & SR_REQ) { - phase = (tmp & PHASE_MASK); - if (phase != old_phase) { - old_phase = phase; - NCR5380_dprint_phase(NDEBUG_INFORMATION, instance); - } - - if(phase == PHASE_CMDOUT) { - void *d; - unsigned long count; - - if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { - count = cmd->SCp.buffer->length; - d = SGADDR(cmd->SCp.buffer); - } else { - count = cmd->SCp.this_residual; - d = cmd->SCp.ptr; - } -#ifdef REAL_DMA - /* this command setup for dma yet? */ - if((count > SUN3_DMA_MINSIZE) && (sun3_dma_setup_done - != cmd)) - { - if (cmd->request->cmd_type == REQ_TYPE_FS) { - sun3scsi_dma_setup(d, count, - rq_data_dir(cmd->request)); - sun3_dma_setup_done = cmd; - } - } -#endif -#ifdef SUN3_SCSI_VME - dregs->csr |= CSR_INTR; -#endif - } - - - if (sink && (phase != PHASE_MSGOUT)) { - NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); - - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | - ICR_ASSERT_ACK); - while (NCR5380_read(STATUS_REG) & SR_REQ); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | - ICR_ASSERT_ATN); - sink = 0; - continue; - } - - switch (phase) { - case PHASE_DATAOUT: -#if (NDEBUG & NDEBUG_NO_DATAOUT) - printk("scsi%d: NDEBUG_NO_DATAOUT set, attempted DATAOUT " - "aborted\n", HOSTNO); - sink = 1; - do_abort(instance); - cmd->result = DID_ERROR << 16; - cmd->scsi_done(cmd); - return; -#endif - case PHASE_DATAIN: - /* - * If there is no room left in the current buffer in the - * scatter-gather list, move onto the next one. - */ - if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { - ++cmd->SCp.buffer; - --cmd->SCp.buffers_residual; - cmd->SCp.this_residual = cmd->SCp.buffer->length; - cmd->SCp.ptr = SGADDR(cmd->SCp.buffer); - dprintk(NDEBUG_INFORMATION, "scsi%d: %d bytes and %d buffers left\n", - HOSTNO, cmd->SCp.this_residual, - cmd->SCp.buffers_residual); - } - - /* - * The preferred transfer method is going to be - * PSEUDO-DMA for systems that are strictly PIO, - * since we can let the hardware do the handshaking. - * - * For this to work, we need to know the transfersize - * ahead of time, since the pseudo-DMA code will sit - * in an unconditional loop. - */ - -/* ++roman: I suggest, this should be - * #if def(REAL_DMA) - * instead of leaving REAL_DMA out. - */ - -#if defined(REAL_DMA) -// if (!cmd->device->borken && - if((transfersize = - NCR5380_dma_xfer_len(instance,cmd,phase)) > SUN3_DMA_MINSIZE) { - len = transfersize; - cmd->SCp.phase = phase; - - if (NCR5380_transfer_dma(instance, &phase, - &len, (unsigned char **) &cmd->SCp.ptr)) { - /* - * If the watchdog timer fires, all future - * accesses to this device will use the - * polled-IO. */ - printk(KERN_NOTICE "scsi%d: switching target %d " - "lun %llu to slow handshake\n", HOSTNO, - cmd->device->id, cmd->device->lun); - cmd->device->borken = 1; - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | - ICR_ASSERT_ATN); - sink = 1; - do_abort(instance); - cmd->result = DID_ERROR << 16; - cmd->scsi_done(cmd); - /* XXX - need to source or sink data here, as appropriate */ - } else { -#ifdef REAL_DMA - /* ++roman: When using real DMA, - * information_transfer() should return after - * starting DMA since it has nothing more to - * do. - */ - return; -#else - cmd->SCp.this_residual -= transfersize - len; -#endif - } - } else -#endif /* defined(REAL_DMA) */ - NCR5380_transfer_pio(instance, &phase, - (int *) &cmd->SCp.this_residual, (unsigned char **) - &cmd->SCp.ptr); -#ifdef REAL_DMA - /* if we had intended to dma that command clear it */ - if(sun3_dma_setup_done == cmd) - sun3_dma_setup_done = NULL; -#endif - - break; - case PHASE_MSGIN: - len = 1; - data = &tmp; - NCR5380_write(SELECT_ENABLE_REG, 0); /* disable reselects */ - NCR5380_transfer_pio(instance, &phase, &len, &data); - cmd->SCp.Message = tmp; - - switch (tmp) { - /* - * Linking lets us reduce the time required to get the - * next command out to the device, hopefully this will - * mean we don't waste another revolution due to the delays - * required by ARBITRATION and another SELECTION. - * - * In the current implementation proposal, low level drivers - * merely have to start the next command, pointed to by - * next_link, done() is called as with unlinked commands. - */ -#ifdef LINKED - case LINKED_CMD_COMPLETE: - case LINKED_FLG_CMD_COMPLETE: - /* Accept message by clearing ACK */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - - dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %llu linked command " - "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun); - - /* Enable reselect interrupts */ - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - /* - * Sanity check : A linked command should only terminate - * with one of these messages if there are more linked - * commands available. - */ - - if (!cmd->next_link) { - printk(KERN_NOTICE "scsi%d: target %d lun %llu " - "linked command complete, no next_link\n", - HOSTNO, cmd->device->id, cmd->device->lun); - sink = 1; - do_abort (instance); - return; - } - - initialize_SCp(cmd->next_link); - /* The next command is still part of this process; copy it - * and don't free it! */ - cmd->next_link->tag = cmd->tag; - cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); - dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %llu linked request " - "done, calling scsi_done().\n", - HOSTNO, cmd->device->id, cmd->device->lun); - cmd->scsi_done(cmd); - cmd = hostdata->connected; - break; -#endif /* def LINKED */ - case ABORT: - case COMMAND_COMPLETE: - /* Accept message by clearing ACK */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - hostdata->connected = NULL; - dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %llu " - "completed\n", HOSTNO, cmd->device->id, cmd->device->lun); -#ifdef SUPPORT_TAGS - cmd_free_tag( cmd ); - if (status_byte(cmd->SCp.Status) == QUEUE_FULL) { - /* Turn a QUEUE FULL status into BUSY, I think the - * mid level cannot handle QUEUE FULL :-( (The - * command is retried after BUSY). Also update our - * queue size to the number of currently issued - * commands now. - */ - /* ++Andreas: the mid level code knows about - QUEUE_FULL now. */ - TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; - dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %llu returned " - "QUEUE_FULL after %d commands\n", - HOSTNO, cmd->device->id, cmd->device->lun, - ta->nr_allocated); - if (ta->queue_size > ta->nr_allocated) - ta->nr_allocated = ta->queue_size; - } -#else - hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); -#endif - /* Enable reselect interrupts */ - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - - /* - * I'm not sure what the correct thing to do here is : - * - * If the command that just executed is NOT a request - * sense, the obvious thing to do is to set the result - * code to the values of the stored parameters. - * - * If it was a REQUEST SENSE command, we need some way to - * differentiate between the failure code of the original - * and the failure code of the REQUEST sense - the obvious - * case is success, where we fall through and leave the - * result code unchanged. - * - * The non-obvious place is where the REQUEST SENSE failed - */ - - if (cmd->cmnd[0] != REQUEST_SENSE) - cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); - else if (status_byte(cmd->SCp.Status) != GOOD) - cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16); - - if ((cmd->cmnd[0] == REQUEST_SENSE) && - hostdata->ses.cmd_len) { - scsi_eh_restore_cmnd(cmd, &hostdata->ses); - hostdata->ses.cmd_len = 0 ; - } - - if ((cmd->cmnd[0] != REQUEST_SENSE) && - (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) { - scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); - dprintk(NDEBUG_AUTOSENSE, "scsi%d: performing request sense\n", - HOSTNO); - /* this is initialized from initialize_SCp - cmd->SCp.buffer = NULL; - cmd->SCp.buffers_residual = 0; - */ - - local_irq_save(flags); - LIST(cmd,hostdata->issue_queue); - SET_NEXT(cmd, hostdata->issue_queue); - hostdata->issue_queue = (struct scsi_cmnd *) cmd; - local_irq_restore(flags); - dprintk(NDEBUG_QUEUES, "scsi%d: REQUEST SENSE added to head of " - "issue queue\n", H_NO(cmd)); - } else { - cmd->scsi_done(cmd); - } - - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - /* - * Restore phase bits to 0 so an interrupted selection, - * arbitration can resume. - */ - NCR5380_write(TARGET_COMMAND_REG, 0); - - while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) - barrier(); - - return; - case MESSAGE_REJECT: - /* Accept message by clearing ACK */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - /* Enable reselect interrupts */ - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - switch (hostdata->last_message) { - case HEAD_OF_QUEUE_TAG: - case ORDERED_QUEUE_TAG: - case SIMPLE_QUEUE_TAG: - /* The target obviously doesn't support tagged - * queuing, even though it announced this ability in - * its INQUIRY data ?!? (maybe only this LUN?) Ok, - * clear 'tagged_supported' and lock the LUN, since - * the command is treated as untagged further on. - */ - cmd->device->tagged_supported = 0; - hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); - cmd->tag = TAG_NONE; - dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %llu rejected " - "QUEUE_TAG message; tagged queuing " - "disabled\n", - HOSTNO, cmd->device->id, cmd->device->lun); - break; - } - break; - case DISCONNECT: - /* Accept message by clearing ACK */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - local_irq_save(flags); - cmd->device->disconnect = 1; - LIST(cmd,hostdata->disconnected_queue); - SET_NEXT(cmd, hostdata->disconnected_queue); - hostdata->connected = NULL; - hostdata->disconnected_queue = cmd; - local_irq_restore(flags); - dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %llu was " - "moved from connected to the " - "disconnected_queue\n", HOSTNO, - cmd->device->id, cmd->device->lun); - /* - * Restore phase bits to 0 so an interrupted selection, - * arbitration can resume. - */ - NCR5380_write(TARGET_COMMAND_REG, 0); - - /* Enable reselect interrupts */ - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - /* Wait for bus free to avoid nasty timeouts */ - while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) - barrier(); -#ifdef SUN3_SCSI_VME - dregs->csr |= CSR_DMA_ENABLE; -#endif - return; - /* - * The SCSI data pointer is *IMPLICITLY* saved on a disconnect - * operation, in violation of the SCSI spec so we can safely - * ignore SAVE/RESTORE pointers calls. - * - * Unfortunately, some disks violate the SCSI spec and - * don't issue the required SAVE_POINTERS message before - * disconnecting, and we have to break spec to remain - * compatible. - */ - case SAVE_POINTERS: - case RESTORE_POINTERS: - /* Accept message by clearing ACK */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - /* Enable reselect interrupts */ - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - break; - case EXTENDED_MESSAGE: -/* - * Extended messages are sent in the following format : - * Byte - * 0 EXTENDED_MESSAGE == 1 - * 1 length (includes one byte for code, doesn't - * include first two bytes) - * 2 code - * 3..length+1 arguments - * - * Start the extended message buffer with the EXTENDED_MESSAGE - * byte, since spi_print_msg() wants the whole thing. - */ - extended_msg[0] = EXTENDED_MESSAGE; - /* Accept first byte by clearing ACK */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - - dprintk(NDEBUG_EXTENDED, "scsi%d: receiving extended message\n", HOSTNO); - - len = 2; - data = extended_msg + 1; - phase = PHASE_MSGIN; - NCR5380_transfer_pio(instance, &phase, &len, &data); - dprintk(NDEBUG_EXTENDED, "scsi%d: length=%d, code=0x%02x\n", HOSTNO, - (int)extended_msg[1], (int)extended_msg[2]); - - if (!len && extended_msg[1] <= - (sizeof (extended_msg) - 1)) { - /* Accept third byte by clearing ACK */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - len = extended_msg[1] - 1; - data = extended_msg + 3; - phase = PHASE_MSGIN; - - NCR5380_transfer_pio(instance, &phase, &len, &data); - dprintk(NDEBUG_EXTENDED, "scsi%d: message received, residual %d\n", - HOSTNO, len); - - switch (extended_msg[2]) { - case EXTENDED_SDTR: - case EXTENDED_WDTR: - case EXTENDED_MODIFY_DATA_POINTER: - case EXTENDED_EXTENDED_IDENTIFY: - tmp = 0; - } - } else if (len) { - printk(KERN_NOTICE "scsi%d: error receiving " - "extended message\n", HOSTNO); - tmp = 0; - } else { - printk(KERN_NOTICE "scsi%d: extended message " - "code %02x length %d is too long\n", - HOSTNO, extended_msg[2], extended_msg[1]); - tmp = 0; - } - /* Fall through to reject message */ - - /* - * If we get something weird that we aren't expecting, - * reject it. - */ - default: - if (!tmp) { - printk(KERN_DEBUG "scsi%d: rejecting message ", HOSTNO); - spi_print_msg(extended_msg); - printk("\n"); - } else if (tmp != EXTENDED_MESSAGE) - printk(KERN_DEBUG "scsi%d: rejecting unknown " - "message %02x from target %d, lun %llu\n", - HOSTNO, tmp, cmd->device->id, cmd->device->lun); - else - printk(KERN_DEBUG "scsi%d: rejecting unknown " - "extended message " - "code %02x, length %d from target %d, lun %llu\n", - HOSTNO, extended_msg[1], extended_msg[0], - cmd->device->id, cmd->device->lun); - - - msgout = MESSAGE_REJECT; - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | - ICR_ASSERT_ATN); - break; - } /* switch (tmp) */ - break; - case PHASE_MSGOUT: - len = 1; - data = &msgout; - hostdata->last_message = msgout; - NCR5380_transfer_pio(instance, &phase, &len, &data); - if (msgout == ABORT) { -#ifdef SUPPORT_TAGS - cmd_free_tag( cmd ); -#else - hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); -#endif - hostdata->connected = NULL; - cmd->result = DID_ERROR << 16; - cmd->scsi_done(cmd); - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - return; - } - msgout = NOP; - break; - case PHASE_CMDOUT: - len = cmd->cmd_len; - data = cmd->cmnd; - /* - * XXX for performance reasons, on machines with a - * PSEUDO-DMA architecture we should probably - * use the dma transfer function. - */ - NCR5380_transfer_pio(instance, &phase, &len, - &data); - break; - case PHASE_STATIN: - len = 1; - data = &tmp; - NCR5380_transfer_pio(instance, &phase, &len, &data); - cmd->SCp.Status = tmp; - break; - default: - printk("scsi%d: unknown phase\n", HOSTNO); - NCR5380_dprint(NDEBUG_ANY, instance); - } /* switch(phase) */ - } /* if (tmp * SR_REQ) */ - } /* while (1) */ -} - -/* - * Function : void NCR5380_reselect (struct Scsi_Host *instance) - * - * Purpose : does reselection, initializing the instance->connected - * field to point to the struct scsi_cmnd for which the I_T_L or I_T_L_Q - * nexus has been reestablished, - * - * Inputs : instance - this instance of the NCR5380. - * - */ - -/* it might eventually prove necessary to do a dma setup on - reselection, but it doesn't seem to be needed now -- sam */ - -static void NCR5380_reselect (struct Scsi_Host *instance) -{ - SETUP_HOSTDATA(instance); - unsigned char target_mask; - unsigned char lun; -#ifdef SUPPORT_TAGS - unsigned char tag; -#endif - unsigned char msg[3]; - struct scsi_cmnd *tmp = NULL, *prev; -/* unsigned long flags; */ - - /* - * Disable arbitration, etc. since the host adapter obviously - * lost, and tell an interrupted NCR5380_select() to restart. - */ - - NCR5380_write(MODE_REG, MR_BASE); - hostdata->restart_select = 1; - - target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); - - dprintk(NDEBUG_RESELECTION, "scsi%d: reselect\n", HOSTNO); - - /* - * At this point, we have detected that our SCSI ID is on the bus, - * SEL is true and BSY was false for at least one bus settle delay - * (400 ns). - * - * We must assert BSY ourselves, until the target drops the SEL - * signal. - */ - - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY); - - while (NCR5380_read(STATUS_REG) & SR_SEL); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - - /* - * Wait for target to go into MSGIN. - */ - - while (!(NCR5380_read(STATUS_REG) & SR_REQ)); - -#if 1 - // acknowledge toggle to MSGIN - NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(PHASE_MSGIN)); - - // peek at the byte without really hitting the bus - msg[0] = NCR5380_read(CURRENT_SCSI_DATA_REG); -#endif - - if (!(msg[0] & 0x80)) { - printk(KERN_DEBUG "scsi%d: expecting IDENTIFY message, got ", HOSTNO); - spi_print_msg(msg); - do_abort(instance); - return; - } - lun = (msg[0] & 0x07); - - /* - * Find the command corresponding to the I_T_L or I_T_L_Q nexus we - * just reestablished, and remove it from the disconnected queue. - */ - - for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue, prev = NULL; - tmp; prev = tmp, tmp = NEXT(tmp) ) { - if ((target_mask == (1 << tmp->device->id)) && (lun == tmp->device->lun) -#ifdef SUPPORT_TAGS - && (tag == tmp->tag) -#endif - ) { - if (prev) { - REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); - SET_NEXT(prev, NEXT(tmp)); - } else { - REMOVE(-1, hostdata->disconnected_queue, tmp, NEXT(tmp)); - hostdata->disconnected_queue = NEXT(tmp); - } - SET_NEXT(tmp, NULL); - break; - } - } - - if (!tmp) { - printk(KERN_WARNING "scsi%d: warning: target bitmask %02x lun %d " -#ifdef SUPPORT_TAGS - "tag %d " -#endif - "not in disconnected_queue.\n", - HOSTNO, target_mask, lun -#ifdef SUPPORT_TAGS - , tag -#endif - ); - /* - * Since we have an established nexus that we can't do anything - * with, we must abort it. - */ - do_abort(instance); - return; - } -#if 1 - /* engage dma setup for the command we just saw */ - { - void *d; - unsigned long count; - - if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) { - count = tmp->SCp.buffer->length; - d = SGADDR(tmp->SCp.buffer); - } else { - count = tmp->SCp.this_residual; - d = tmp->SCp.ptr; - } -#ifdef REAL_DMA - /* setup this command for dma if not already */ - if((count > SUN3_DMA_MINSIZE) && (sun3_dma_setup_done != tmp)) - { - sun3scsi_dma_setup(d, count, rq_data_dir(tmp->request)); - sun3_dma_setup_done = tmp; - } -#endif - } -#endif - - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); - /* Accept message by clearing ACK */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - -#ifdef SUPPORT_TAGS - /* If the phase is still MSGIN, the target wants to send some more - * messages. In case it supports tagged queuing, this is probably a - * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus. - */ - tag = TAG_NONE; - if (phase == PHASE_MSGIN && setup_use_tagged_queuing) { - /* Accept previous IDENTIFY message by clearing ACK */ - NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE ); - len = 2; - data = msg+1; - if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && - msg[1] == SIMPLE_QUEUE_TAG) - tag = msg[2]; - dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at " - "reselection\n", HOSTNO, target_mask, lun, tag); - } -#endif - - hostdata->connected = tmp; - dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %llu, tag = %d\n", - HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag); -} - - -/* - * Function : int NCR5380_abort(struct scsi_cmnd *cmd) - * - * Purpose : abort a command - * - * Inputs : cmd - the struct scsi_cmnd to abort, code - code to set the - * host byte of the result field to, if zero DID_ABORTED is - * used. - * - * Returns : SUCCESS - success, FAILED on failure. - * - * XXX - there is no way to abort the command that is currently - * connected, you have to wait for it to complete. If this is - * a problem, we could implement longjmp() / setjmp(), setjmp() - * called where the loop started in NCR5380_main(). - */ - -static int NCR5380_abort(struct scsi_cmnd *cmd) -{ - struct Scsi_Host *instance = cmd->device->host; - SETUP_HOSTDATA(instance); - struct scsi_cmnd *tmp, **prev; - unsigned long flags; - - scmd_printk(KERN_NOTICE, cmd, "aborting command\n"); - - NCR5380_print_status (instance); - - local_irq_save(flags); - - dprintk(NDEBUG_ABORT, "scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, - NCR5380_read(BUS_AND_STATUS_REG), - NCR5380_read(STATUS_REG)); - -#if 1 -/* - * Case 1 : If the command is the currently executing command, - * we'll set the aborted flag and return control so that - * information transfer routine can exit cleanly. - */ - - if (hostdata->connected == cmd) { - - dprintk(NDEBUG_ABORT, "scsi%d: aborting connected command\n", HOSTNO); -/* - * We should perform BSY checking, and make sure we haven't slipped - * into BUS FREE. - */ - -/* NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_ATN); */ -/* - * Since we can't change phases until we've completed the current - * handshake, we have to source or sink a byte of data if the current - * phase is not MSGOUT. - */ - -/* - * Return control to the executing NCR drive so we can clear the - * aborted flag and get back into our main loop. - */ - - if (do_abort(instance) == 0) { - hostdata->aborted = 1; - hostdata->connected = NULL; - cmd->result = DID_ABORT << 16; -#ifdef SUPPORT_TAGS - cmd_free_tag( cmd ); -#else - hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); -#endif - local_irq_restore(flags); - cmd->scsi_done(cmd); - return SUCCESS; - } else { -/* local_irq_restore(flags); */ - printk("scsi%d: abort of connected command failed!\n", HOSTNO); - return FAILED; - } - } -#endif - -/* - * Case 2 : If the command hasn't been issued yet, we simply remove it - * from the issue queue. - */ - for (prev = (struct scsi_cmnd **) &(hostdata->issue_queue), - tmp = (struct scsi_cmnd *) hostdata->issue_queue; - tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp)) - if (cmd == tmp) { - REMOVE(5, *prev, tmp, NEXT(tmp)); - (*prev) = NEXT(tmp); - SET_NEXT(tmp, NULL); - tmp->result = DID_ABORT << 16; - local_irq_restore(flags); - dprintk(NDEBUG_ABORT, "scsi%d: abort removed command from issue queue.\n", - HOSTNO); - /* Tagged queuing note: no tag to free here, hasn't been assigned - * yet... */ - tmp->scsi_done(tmp); - return SUCCESS; - } - -/* - * Case 3 : If any commands are connected, we're going to fail the abort - * and let the high level SCSI driver retry at a later time or - * issue a reset. - * - * Timeouts, and therefore aborted commands, will be highly unlikely - * and handling them cleanly in this situation would make the common - * case of noresets less efficient, and would pollute our code. So, - * we fail. - */ - - if (hostdata->connected) { - local_irq_restore(flags); - dprintk(NDEBUG_ABORT, "scsi%d: abort failed, command connected.\n", HOSTNO); - return FAILED; - } - -/* - * Case 4: If the command is currently disconnected from the bus, and - * there are no connected commands, we reconnect the I_T_L or - * I_T_L_Q nexus associated with it, go into message out, and send - * an abort message. - * - * This case is especially ugly. In order to reestablish the nexus, we - * need to call NCR5380_select(). The easiest way to implement this - * function was to abort if the bus was busy, and let the interrupt - * handler triggered on the SEL for reselect take care of lost arbitrations - * where necessary, meaning interrupts need to be enabled. - * - * When interrupts are enabled, the queues may change - so we - * can't remove it from the disconnected queue before selecting it - * because that could cause a failure in hashing the nexus if that - * device reselected. - * - * Since the queues may change, we can't use the pointers from when we - * first locate it. - * - * So, we must first locate the command, and if NCR5380_select() - * succeeds, then issue the abort, relocate the command and remove - * it from the disconnected queue. - */ - - for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue; tmp; - tmp = NEXT(tmp)) - if (cmd == tmp) { - local_irq_restore(flags); - dprintk(NDEBUG_ABORT, "scsi%d: aborting disconnected command.\n", HOSTNO); - - if (NCR5380_select(instance, cmd)) - return FAILED; - - dprintk(NDEBUG_ABORT, "scsi%d: nexus reestablished.\n", HOSTNO); - - do_abort (instance); - - local_irq_save(flags); - for (prev = (struct scsi_cmnd **) &(hostdata->disconnected_queue), - tmp = (struct scsi_cmnd *) hostdata->disconnected_queue; - tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp) ) - if (cmd == tmp) { - REMOVE(5, *prev, tmp, NEXT(tmp)); - *prev = NEXT(tmp); - SET_NEXT(tmp, NULL); - tmp->result = DID_ABORT << 16; - /* We must unlock the tag/LUN immediately here, since the - * target goes to BUS FREE and doesn't send us another - * message (COMMAND_COMPLETE or the like) - */ -#ifdef SUPPORT_TAGS - cmd_free_tag( tmp ); -#else - hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); -#endif - local_irq_restore(flags); - tmp->scsi_done(tmp); - return SUCCESS; - } - } - -/* - * Case 5 : If we reached this point, the command was not found in any of - * the queues. - * - * We probably reached this point because of an unlikely race condition - * between the command completing successfully and the abortion code, - * so we won't panic, but we will notify the user in case something really - * broke. - */ - - local_irq_restore(flags); - printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO); - - return FAILED; -} - - -/* - * Function : int NCR5380_bus_reset(struct scsi_cmnd *cmd) - * - * Purpose : reset the SCSI bus. - * - * Returns : SUCCESS or FAILURE - * - */ - -static int NCR5380_bus_reset(struct scsi_cmnd *cmd) -{ - SETUP_HOSTDATA(cmd->device->host); - int i; - unsigned long flags; -#if defined(RESET_RUN_DONE) - struct scsi_cmnd *connected, *disconnected_queue; -#endif - - - NCR5380_print_status (cmd->device->host); - - /* get in phase */ - NCR5380_write( TARGET_COMMAND_REG, - PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) )); - /* assert RST */ - NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST ); - udelay (40); - /* reset NCR registers */ - NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE ); - NCR5380_write( MODE_REG, MR_BASE ); - NCR5380_write( TARGET_COMMAND_REG, 0 ); - NCR5380_write( SELECT_ENABLE_REG, 0 ); - /* ++roman: reset interrupt condition! otherwise no interrupts don't get - * through anymore ... */ - (void)NCR5380_read( RESET_PARITY_INTERRUPT_REG ); - - /* MSch 20140115 - looking at the generic NCR5380 driver, all of this - * should go. - * Catch-22: if we don't clear all queues, the SCSI driver lock will - * not be released by atari_scsi_reset()! - */ - -#if defined(RESET_RUN_DONE) - /* XXX Should now be done by midlevel code, but it's broken XXX */ - /* XXX see below XXX */ - - /* MSch: old-style reset: actually abort all command processing here */ - - /* After the reset, there are no more connected or disconnected commands - * and no busy units; to avoid problems with re-inserting the commands - * into the issue_queue (via scsi_done()), the aborted commands are - * remembered in local variables first. - */ - local_irq_save(flags); - connected = (struct scsi_cmnd *)hostdata->connected; - hostdata->connected = NULL; - disconnected_queue = (struct scsi_cmnd *)hostdata->disconnected_queue; - hostdata->disconnected_queue = NULL; -#ifdef SUPPORT_TAGS - free_all_tags(); -#endif - for( i = 0; i < 8; ++i ) - hostdata->busy[i] = 0; -#ifdef REAL_DMA - hostdata->dma_len = 0; -#endif - local_irq_restore(flags); - - /* In order to tell the mid-level code which commands were aborted, - * set the command status to DID_RESET and call scsi_done() !!! - * This ultimately aborts processing of these commands in the mid-level. - */ - - if ((cmd = connected)) { - dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd)); - cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); - cmd->scsi_done( cmd ); - } - - for (i = 0; (cmd = disconnected_queue); ++i) { - disconnected_queue = NEXT(cmd); - SET_NEXT(cmd, NULL); - cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); - cmd->scsi_done( cmd ); - } - if (i > 0) - dprintk(NDEBUG_ABORT, "scsi: reset aborted %d disconnected command(s)\n", i); - - - /* since all commands have been explicitly terminated, we need to tell - * the midlevel code that the reset was SUCCESSFUL, and there is no - * need to 'wake up' the commands by a request_sense - */ - return SUCCESS; -#else /* 1 */ - - /* MSch: new-style reset handling: let the mid-level do what it can */ - - /* ++guenther: MID-LEVEL IS STILL BROKEN. - * Mid-level is supposed to requeue all commands that were active on the - * various low-level queues. In fact it does this, but that's not enough - * because all these commands are subject to timeout. And if a timeout - * happens for any removed command, *_abort() is called but all queues - * are now empty. Abort then gives up the falcon lock, which is fatal, - * since the mid-level will queue more commands and must have the lock - * (it's all happening inside timer interrupt handler!!). - * Even worse, abort will return NOT_RUNNING for all those commands not - * on any queue, so they won't be retried ... - * - * Conclusion: either scsi.c disables timeout for all resetted commands - * immediately, or we lose! As of linux-2.0.20 it doesn't. - */ - - /* After the reset, there are no more connected or disconnected commands - * and no busy units; so clear the low-level status here to avoid - * conflicts when the mid-level code tries to wake up the affected - * commands! - */ - - if (hostdata->issue_queue) - dprintk(NDEBUG_ABORT, "scsi%d: reset aborted issued command(s)\n", H_NO(cmd)); - if (hostdata->connected) - dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd)); - if (hostdata->disconnected_queue) - dprintk(NDEBUG_ABORT, "scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd)); - - local_irq_save(flags); - hostdata->issue_queue = NULL; - hostdata->connected = NULL; - hostdata->disconnected_queue = NULL; -#ifdef SUPPORT_TAGS - free_all_tags(); -#endif - for( i = 0; i < 8; ++i ) - hostdata->busy[i] = 0; -#ifdef REAL_DMA - hostdata->dma_len = 0; -#endif - local_irq_restore(flags); - - /* we did no complete reset of all commands, so a wakeup is required */ - return SUCCESS; -#endif /* 1 */ -} - -/* Local Variables: */ -/* tab-width: 8 */ -/* End: */ diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index 446c8e58a8db..985a6b36756e 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c @@ -41,6 +41,8 @@ #define REAL_DMA #define RESET_RUN_DONE /* #define SUPPORT_TAGS */ +/* minimum number of bytes to do dma on */ +#define DMA_MIN_SIZE 129 /* #define MAX_TAGS 32 */ @@ -64,6 +66,9 @@ #define NCR5380_dma_xfer_len(instance, cmd, phase) \ sun3scsi_dma_xfer_len(cmd->SCp.this_residual, cmd, !((phase) & SR_IO)) +#define NCR5380_acquire_dma_irq(instance) (1) +#define NCR5380_release_dma_irq(instance) + #include "NCR5380.h" @@ -92,9 +97,6 @@ module_param(setup_hostid, int, 0); /* dvma buffer to allocate -- 32k should hopefully be more than sufficient */ #define SUN3_DVMA_BUFSIZE 0xe000 -/* minimum number of bytes to do dma on */ -#define SUN3_DMA_MINSIZE 128 - static struct scsi_cmnd *sun3_dma_setup_done; static unsigned char *sun3_scsi_regp; static volatile struct sun3_dma_regs *dregs; @@ -486,7 +488,7 @@ static int sun3scsi_dma_finish(int write_flag) } -#include "sun3_NCR5380.c" +#include "atari_NCR5380.c" #ifdef SUN3_SCSI_VME #define SUN3_SCSI_NAME "Sun3 NCR5380 VME SCSI" -- cgit v1.2.3-59-g8ed1b From 003f7de6258900e17f6206e8e417d76c75ca549f Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 19 Nov 2014 16:22:52 +0100 Subject: KVM: ia64: remove KVM for ia64 has been marked as broken not just once, but twice even, and the last patch from the maintainer is now roughly 5 years old. Time for it to rest in peace. Acked-by: Gleb Natapov Signed-off-by: Paolo Bonzini --- MAINTAINERS | 9 - arch/ia64/Kconfig | 3 - arch/ia64/Makefile | 1 - arch/ia64/include/asm/kvm_host.h | 609 ---------- arch/ia64/include/asm/pvclock-abi.h | 48 - arch/ia64/include/uapi/asm/kvm.h | 268 ----- arch/ia64/kvm/Kconfig | 66 -- arch/ia64/kvm/Makefile | 67 -- arch/ia64/kvm/asm-offsets.c | 241 ---- arch/ia64/kvm/irq.h | 33 - arch/ia64/kvm/kvm-ia64.c | 1942 ------------------------------ arch/ia64/kvm/kvm_fw.c | 674 ----------- arch/ia64/kvm/kvm_lib.c | 21 - arch/ia64/kvm/kvm_minstate.h | 266 ----- arch/ia64/kvm/lapic.h | 30 - arch/ia64/kvm/memcpy.S | 1 - arch/ia64/kvm/memset.S | 1 - arch/ia64/kvm/misc.h | 94 -- arch/ia64/kvm/mmio.c | 336 ------ arch/ia64/kvm/optvfault.S | 1090 ----------------- arch/ia64/kvm/process.c | 1024 ---------------- arch/ia64/kvm/trampoline.S | 1038 ---------------- arch/ia64/kvm/vcpu.c | 2209 ----------------------------------- arch/ia64/kvm/vcpu.h | 752 ------------ arch/ia64/kvm/vmm.c | 99 -- arch/ia64/kvm/vmm_ivt.S | 1392 ---------------------- arch/ia64/kvm/vti.h | 290 ----- arch/ia64/kvm/vtlb.c | 640 ---------- virt/kvm/ioapic.c | 5 - virt/kvm/ioapic.h | 1 - virt/kvm/irq_comm.c | 22 - 31 files changed, 13272 deletions(-) delete mode 100644 arch/ia64/include/asm/kvm_host.h delete mode 100644 arch/ia64/include/asm/pvclock-abi.h delete mode 100644 arch/ia64/include/uapi/asm/kvm.h delete mode 100644 arch/ia64/kvm/Kconfig delete mode 100644 arch/ia64/kvm/Makefile delete mode 100644 arch/ia64/kvm/asm-offsets.c delete mode 100644 arch/ia64/kvm/irq.h delete mode 100644 arch/ia64/kvm/kvm-ia64.c delete mode 100644 arch/ia64/kvm/kvm_fw.c delete mode 100644 arch/ia64/kvm/kvm_lib.c delete mode 100644 arch/ia64/kvm/kvm_minstate.h delete mode 100644 arch/ia64/kvm/lapic.h delete mode 100644 arch/ia64/kvm/memcpy.S delete mode 100644 arch/ia64/kvm/memset.S delete mode 100644 arch/ia64/kvm/misc.h delete mode 100644 arch/ia64/kvm/mmio.c delete mode 100644 arch/ia64/kvm/optvfault.S delete mode 100644 arch/ia64/kvm/process.c delete mode 100644 arch/ia64/kvm/trampoline.S delete mode 100644 arch/ia64/kvm/vcpu.c delete mode 100644 arch/ia64/kvm/vcpu.h delete mode 100644 arch/ia64/kvm/vmm.c delete mode 100644 arch/ia64/kvm/vmm_ivt.S delete mode 100644 arch/ia64/kvm/vti.h delete mode 100644 arch/ia64/kvm/vtlb.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 3c6427190be2..0b8321afa35a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5352,15 +5352,6 @@ S: Supported F: arch/powerpc/include/asm/kvm* F: arch/powerpc/kvm/ -KERNEL VIRTUAL MACHINE For Itanium (KVM/IA64) -M: Xiantao Zhang -L: kvm-ia64@vger.kernel.org -W: http://kvm.qumranet.com -S: Supported -F: Documentation/ia64/kvm.txt -F: arch/ia64/include/asm/kvm* -F: arch/ia64/kvm/ - KERNEL VIRTUAL MACHINE for s390 (KVM/s390) M: Christian Borntraeger M: Cornelia Huck diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index c84c88bbbbd7..11afe7ab1981 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -21,7 +21,6 @@ config IA64 select HAVE_DYNAMIC_FTRACE if (!ITANIUM) select HAVE_FUNCTION_TRACER select HAVE_DMA_ATTRS - select HAVE_KVM select TTY select HAVE_ARCH_TRACEHOOK select HAVE_DMA_API_DEBUG @@ -640,8 +639,6 @@ source "security/Kconfig" source "crypto/Kconfig" -source "arch/ia64/kvm/Kconfig" - source "lib/Kconfig" config IOMMU_HELPER diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index 5441b14994fc..970d0bd99621 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile @@ -53,7 +53,6 @@ core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/ core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/ core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/ core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/ -core-$(CONFIG_KVM) += arch/ia64/kvm/ drivers-$(CONFIG_PCI) += arch/ia64/pci/ drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/ diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h deleted file mode 100644 index 4729752b7256..000000000000 --- a/arch/ia64/include/asm/kvm_host.h +++ /dev/null @@ -1,609 +0,0 @@ -/* - * kvm_host.h: used for kvm module, and hold ia64-specific sections. - * - * Copyright (C) 2007, Intel Corporation. - * - * Xiantao Zhang - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - * - */ - -#ifndef __ASM_KVM_HOST_H -#define __ASM_KVM_HOST_H - -#define KVM_USER_MEM_SLOTS 32 - -#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 -#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS - -/* define exit reasons from vmm to kvm*/ -#define EXIT_REASON_VM_PANIC 0 -#define EXIT_REASON_MMIO_INSTRUCTION 1 -#define EXIT_REASON_PAL_CALL 2 -#define EXIT_REASON_SAL_CALL 3 -#define EXIT_REASON_SWITCH_RR6 4 -#define EXIT_REASON_VM_DESTROY 5 -#define EXIT_REASON_EXTERNAL_INTERRUPT 6 -#define EXIT_REASON_IPI 7 -#define EXIT_REASON_PTC_G 8 -#define EXIT_REASON_DEBUG 20 - -/*Define vmm address space and vm data space.*/ -#define KVM_VMM_SIZE (__IA64_UL_CONST(16)<<20) -#define KVM_VMM_SHIFT 24 -#define KVM_VMM_BASE 0xD000000000000000 -#define VMM_SIZE (__IA64_UL_CONST(8)<<20) - -/* - * Define vm_buffer, used by PAL Services, base address. - * Note: vm_buffer is in the VMM-BLOCK, the size must be < 8M - */ -#define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE) -#define KVM_VM_BUFFER_SIZE (__IA64_UL_CONST(8)<<20) - -/* - * kvm guest's data area looks as follow: - * - * +----------------------+ ------- KVM_VM_DATA_SIZE - * | vcpu[n]'s data | | ___________________KVM_STK_OFFSET - * | | | / | - * | .......... | | /vcpu's struct&stack | - * | .......... | | /---------------------|---- 0 - * | vcpu[5]'s data | | / vpd | - * | vcpu[4]'s data | |/-----------------------| - * | vcpu[3]'s data | / vtlb | - * | vcpu[2]'s data | /|------------------------| - * | vcpu[1]'s data |/ | vhpt | - * | vcpu[0]'s data |____________________________| - * +----------------------+ | - * | memory dirty log | | - * +----------------------+ | - * | vm's data struct | | - * +----------------------+ | - * | | | - * | | | - * | | | - * | | | - * | | | - * | | | - * | | | - * | vm's p2m table | | - * | | | - * | | | - * | | | | - * vm's data->| | | | - * +----------------------+ ------- 0 - * To support large memory, needs to increase the size of p2m. - * To support more vcpus, needs to ensure it has enough space to - * hold vcpus' data. - */ - -#define KVM_VM_DATA_SHIFT 26 -#define KVM_VM_DATA_SIZE (__IA64_UL_CONST(1) << KVM_VM_DATA_SHIFT) -#define KVM_VM_DATA_BASE (KVM_VMM_BASE + KVM_VM_DATA_SIZE) - -#define KVM_P2M_BASE KVM_VM_DATA_BASE -#define KVM_P2M_SIZE (__IA64_UL_CONST(24) << 20) - -#define VHPT_SHIFT 16 -#define VHPT_SIZE (__IA64_UL_CONST(1) << VHPT_SHIFT) -#define VHPT_NUM_ENTRIES (__IA64_UL_CONST(1) << (VHPT_SHIFT-5)) - -#define VTLB_SHIFT 16 -#define VTLB_SIZE (__IA64_UL_CONST(1) << VTLB_SHIFT) -#define VTLB_NUM_ENTRIES (1UL << (VHPT_SHIFT-5)) - -#define VPD_SHIFT 16 -#define VPD_SIZE (__IA64_UL_CONST(1) << VPD_SHIFT) - -#define VCPU_STRUCT_SHIFT 16 -#define VCPU_STRUCT_SIZE (__IA64_UL_CONST(1) << VCPU_STRUCT_SHIFT) - -/* - * This must match KVM_IA64_VCPU_STACK_{SHIFT,SIZE} arch/ia64/include/asm/kvm.h - */ -#define KVM_STK_SHIFT 16 -#define KVM_STK_OFFSET (__IA64_UL_CONST(1)<< KVM_STK_SHIFT) - -#define KVM_VM_STRUCT_SHIFT 19 -#define KVM_VM_STRUCT_SIZE (__IA64_UL_CONST(1) << KVM_VM_STRUCT_SHIFT) - -#define KVM_MEM_DIRY_LOG_SHIFT 19 -#define KVM_MEM_DIRTY_LOG_SIZE (__IA64_UL_CONST(1) << KVM_MEM_DIRY_LOG_SHIFT) - -#ifndef __ASSEMBLY__ - -/*Define the max vcpus and memory for Guests.*/ -#define KVM_MAX_VCPUS (KVM_VM_DATA_SIZE - KVM_P2M_SIZE - KVM_VM_STRUCT_SIZE -\ - KVM_MEM_DIRTY_LOG_SIZE) / sizeof(struct kvm_vcpu_data) -#define KVM_MAX_MEM_SIZE (KVM_P2M_SIZE >> 3 << PAGE_SHIFT) - -#define VMM_LOG_LEN 256 - -#include -#include -#include -#include -#include - -#include -#include -#include - -struct kvm_vcpu_data { - char vcpu_vhpt[VHPT_SIZE]; - char vcpu_vtlb[VTLB_SIZE]; - char vcpu_vpd[VPD_SIZE]; - char vcpu_struct[VCPU_STRUCT_SIZE]; -}; - -struct kvm_vm_data { - char kvm_p2m[KVM_P2M_SIZE]; - char kvm_vm_struct[KVM_VM_STRUCT_SIZE]; - char kvm_mem_dirty_log[KVM_MEM_DIRTY_LOG_SIZE]; - struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS]; -}; - -#define VCPU_BASE(n) (KVM_VM_DATA_BASE + \ - offsetof(struct kvm_vm_data, vcpu_data[n])) -#define KVM_VM_BASE (KVM_VM_DATA_BASE + \ - offsetof(struct kvm_vm_data, kvm_vm_struct)) -#define KVM_MEM_DIRTY_LOG_BASE KVM_VM_DATA_BASE + \ - offsetof(struct kvm_vm_data, kvm_mem_dirty_log) - -#define VHPT_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vhpt)) -#define VTLB_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vtlb)) -#define VPD_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vpd)) -#define VCPU_STRUCT_BASE(n) (VCPU_BASE(n) + \ - offsetof(struct kvm_vcpu_data, vcpu_struct)) - -/*IO section definitions*/ -#define IOREQ_READ 1 -#define IOREQ_WRITE 0 - -#define STATE_IOREQ_NONE 0 -#define STATE_IOREQ_READY 1 -#define STATE_IOREQ_INPROCESS 2 -#define STATE_IORESP_READY 3 - -/*Guest Physical address layout.*/ -#define GPFN_MEM (0UL << 60) /* Guest pfn is normal mem */ -#define GPFN_FRAME_BUFFER (1UL << 60) /* VGA framebuffer */ -#define GPFN_LOW_MMIO (2UL << 60) /* Low MMIO range */ -#define GPFN_PIB (3UL << 60) /* PIB base */ -#define GPFN_IOSAPIC (4UL << 60) /* IOSAPIC base */ -#define GPFN_LEGACY_IO (5UL << 60) /* Legacy I/O base */ -#define GPFN_GFW (6UL << 60) /* Guest Firmware */ -#define GPFN_PHYS_MMIO (7UL << 60) /* Directed MMIO Range */ - -#define GPFN_IO_MASK (7UL << 60) /* Guest pfn is I/O type */ -#define GPFN_INV_MASK (1UL << 63) /* Guest pfn is invalid */ -#define INVALID_MFN (~0UL) -#define MEM_G (1UL << 30) -#define MEM_M (1UL << 20) -#define MMIO_START (3 * MEM_G) -#define MMIO_SIZE (512 * MEM_M) -#define VGA_IO_START 0xA0000UL -#define VGA_IO_SIZE 0x20000 -#define LEGACY_IO_START (MMIO_START + MMIO_SIZE) -#define LEGACY_IO_SIZE (64 * MEM_M) -#define IO_SAPIC_START 0xfec00000UL -#define IO_SAPIC_SIZE 0x100000 -#define PIB_START 0xfee00000UL -#define PIB_SIZE 0x200000 -#define GFW_START (4 * MEM_G - 16 * MEM_M) -#define GFW_SIZE (16 * MEM_M) - -/*Deliver mode, defined for ioapic.c*/ -#define dest_Fixed IOSAPIC_FIXED -#define dest_LowestPrio IOSAPIC_LOWEST_PRIORITY - -#define NMI_VECTOR 2 -#define ExtINT_VECTOR 0 -#define NULL_VECTOR (-1) -#define IA64_SPURIOUS_INT_VECTOR 0x0f - -#define VCPU_LID(v) (((u64)(v)->vcpu_id) << 24) - -/* - *Delivery mode - */ -#define SAPIC_DELIV_SHIFT 8 -#define SAPIC_FIXED 0x0 -#define SAPIC_LOWEST_PRIORITY 0x1 -#define SAPIC_PMI 0x2 -#define SAPIC_NMI 0x4 -#define SAPIC_INIT 0x5 -#define SAPIC_EXTINT 0x7 - -/* - * vcpu->requests bit members for arch - */ -#define KVM_REQ_PTC_G 32 -#define KVM_REQ_RESUME 33 - -struct kvm_mmio_req { - uint64_t addr; /* physical address */ - uint64_t size; /* size in bytes */ - uint64_t data; /* data (or paddr of data) */ - uint8_t state:4; - uint8_t dir:1; /* 1=read, 0=write */ -}; - -/*Pal data struct */ -struct kvm_pal_call{ - /*In area*/ - uint64_t gr28; - uint64_t gr29; - uint64_t gr30; - uint64_t gr31; - /*Out area*/ - struct ia64_pal_retval ret; -}; - -/* Sal data structure */ -struct kvm_sal_call{ - /*In area*/ - uint64_t in0; - uint64_t in1; - uint64_t in2; - uint64_t in3; - uint64_t in4; - uint64_t in5; - uint64_t in6; - uint64_t in7; - struct sal_ret_values ret; -}; - -/*Guest change rr6*/ -struct kvm_switch_rr6 { - uint64_t old_rr; - uint64_t new_rr; -}; - -union ia64_ipi_a{ - unsigned long val; - struct { - unsigned long rv : 3; - unsigned long ir : 1; - unsigned long eid : 8; - unsigned long id : 8; - unsigned long ib_base : 44; - }; -}; - -union ia64_ipi_d { - unsigned long val; - struct { - unsigned long vector : 8; - unsigned long dm : 3; - unsigned long ig : 53; - }; -}; - -/*ipi check exit data*/ -struct kvm_ipi_data{ - union ia64_ipi_a addr; - union ia64_ipi_d data; -}; - -/*global purge data*/ -struct kvm_ptc_g { - unsigned long vaddr; - unsigned long rr; - unsigned long ps; - struct kvm_vcpu *vcpu; -}; - -/*Exit control data */ -struct exit_ctl_data{ - uint32_t exit_reason; - uint32_t vm_status; - union { - struct kvm_mmio_req ioreq; - struct kvm_pal_call pal_data; - struct kvm_sal_call sal_data; - struct kvm_switch_rr6 rr_data; - struct kvm_ipi_data ipi_data; - struct kvm_ptc_g ptc_g_data; - } u; -}; - -union pte_flags { - unsigned long val; - struct { - unsigned long p : 1; /*0 */ - unsigned long : 1; /* 1 */ - unsigned long ma : 3; /* 2-4 */ - unsigned long a : 1; /* 5 */ - unsigned long d : 1; /* 6 */ - unsigned long pl : 2; /* 7-8 */ - unsigned long ar : 3; /* 9-11 */ - unsigned long ppn : 38; /* 12-49 */ - unsigned long : 2; /* 50-51 */ - unsigned long ed : 1; /* 52 */ - }; -}; - -union ia64_pta { - unsigned long val; - struct { - unsigned long ve : 1; - unsigned long reserved0 : 1; - unsigned long size : 6; - unsigned long vf : 1; - unsigned long reserved1 : 6; - unsigned long base : 49; - }; -}; - -struct thash_cb { - /* THASH base information */ - struct thash_data *hash; /* hash table pointer */ - union ia64_pta pta; - int num; -}; - -struct kvm_vcpu_stat { - u32 halt_wakeup; -}; - -struct kvm_vcpu_arch { - int launched; - int last_exit; - int last_run_cpu; - int vmm_tr_slot; - int vm_tr_slot; - int sn_rtc_tr_slot; - -#define KVM_MP_STATE_RUNNABLE 0 -#define KVM_MP_STATE_UNINITIALIZED 1 -#define KVM_MP_STATE_INIT_RECEIVED 2 -#define KVM_MP_STATE_HALTED 3 - int mp_state; - -#define MAX_PTC_G_NUM 3 - int ptc_g_count; - struct kvm_ptc_g ptc_g_data[MAX_PTC_G_NUM]; - - /*halt timer to wake up sleepy vcpus*/ - struct hrtimer hlt_timer; - long ht_active; - - struct kvm_lapic *apic; /* kernel irqchip context */ - struct vpd *vpd; - - /* Exit data for vmm_transition*/ - struct exit_ctl_data exit_data; - - cpumask_t cache_coherent_map; - - unsigned long vmm_rr; - unsigned long host_rr6; - unsigned long psbits[8]; - unsigned long cr_iipa; - unsigned long cr_isr; - unsigned long vsa_base; - unsigned long dirty_log_lock_pa; - unsigned long __gp; - /* TR and TC. */ - struct thash_data itrs[NITRS]; - struct thash_data dtrs[NDTRS]; - /* Bit is set if there is a tr/tc for the region. */ - unsigned char itr_regions; - unsigned char dtr_regions; - unsigned char tc_regions; - /* purge all */ - unsigned long ptce_base; - unsigned long ptce_count[2]; - unsigned long ptce_stride[2]; - /* itc/itm */ - unsigned long last_itc; - long itc_offset; - unsigned long itc_check; - unsigned long timer_check; - unsigned int timer_pending; - unsigned int timer_fired; - - unsigned long vrr[8]; - unsigned long ibr[8]; - unsigned long dbr[8]; - unsigned long insvc[4]; /* Interrupt in service. */ - unsigned long xtp; - - unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */ - unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */ - unsigned long metaphysical_saved_rr0; /* from kvm_arch */ - unsigned long metaphysical_saved_rr4; /* from kvm_arch */ - unsigned long fp_psr; /*used for lazy float register */ - unsigned long saved_gp; - /*for phycial emulation */ - int mode_flags; - struct thash_cb vtlb; - struct thash_cb vhpt; - char irq_check; - char irq_new_pending; - - unsigned long opcode; - unsigned long cause; - char log_buf[VMM_LOG_LEN]; - union context host; - union context guest; - - char mmio_data[8]; -}; - -struct kvm_vm_stat { - u64 remote_tlb_flush; -}; - -struct kvm_sal_data { - unsigned long boot_ip; - unsigned long boot_gp; -}; - -struct kvm_arch_memory_slot { -}; - -struct kvm_arch { - spinlock_t dirty_log_lock; - - unsigned long vm_base; - unsigned long metaphysical_rr0; - unsigned long metaphysical_rr4; - unsigned long vmm_init_rr; - - int is_sn2; - - struct kvm_ioapic *vioapic; - struct kvm_vm_stat stat; - struct kvm_sal_data rdv_sal_data; - - struct list_head assigned_dev_head; - struct iommu_domain *iommu_domain; - bool iommu_noncoherent; - - unsigned long irq_sources_bitmap; - unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; -}; - -union cpuid3_t { - u64 value; - struct { - u64 number : 8; - u64 revision : 8; - u64 model : 8; - u64 family : 8; - u64 archrev : 8; - u64 rv : 24; - }; -}; - -struct kvm_pt_regs { - /* The following registers are saved by SAVE_MIN: */ - unsigned long b6; /* scratch */ - unsigned long b7; /* scratch */ - - unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */ - unsigned long ar_ssd; /* reserved for future use (scratch) */ - - unsigned long r8; /* scratch (return value register 0) */ - unsigned long r9; /* scratch (return value register 1) */ - unsigned long r10; /* scratch (return value register 2) */ - unsigned long r11; /* scratch (return value register 3) */ - - unsigned long cr_ipsr; /* interrupted task's psr */ - unsigned long cr_iip; /* interrupted task's instruction pointer */ - unsigned long cr_ifs; /* interrupted task's function state */ - - unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ - unsigned long ar_pfs; /* prev function state */ - unsigned long ar_rsc; /* RSE configuration */ - /* The following two are valid only if cr_ipsr.cpl > 0: */ - unsigned long ar_rnat; /* RSE NaT */ - unsigned long ar_bspstore; /* RSE bspstore */ - - unsigned long pr; /* 64 predicate registers (1 bit each) */ - unsigned long b0; /* return pointer (bp) */ - unsigned long loadrs; /* size of dirty partition << 16 */ - - unsigned long r1; /* the gp pointer */ - unsigned long r12; /* interrupted task's memory stack pointer */ - unsigned long r13; /* thread pointer */ - - unsigned long ar_fpsr; /* floating point status (preserved) */ - unsigned long r15; /* scratch */ - - /* The remaining registers are NOT saved for system calls. */ - unsigned long r14; /* scratch */ - unsigned long r2; /* scratch */ - unsigned long r3; /* scratch */ - unsigned long r16; /* scratch */ - unsigned long r17; /* scratch */ - unsigned long r18; /* scratch */ - unsigned long r19; /* scratch */ - unsigned long r20; /* scratch */ - unsigned long r21; /* scratch */ - unsigned long r22; /* scratch */ - unsigned long r23; /* scratch */ - unsigned long r24; /* scratch */ - unsigned long r25; /* scratch */ - unsigned long r26; /* scratch */ - unsigned long r27; /* scratch */ - unsigned long r28; /* scratch */ - unsigned long r29; /* scratch */ - unsigned long r30; /* scratch */ - unsigned long r31; /* scratch */ - unsigned long ar_ccv; /* compare/exchange value (scratch) */ - - /* - * Floating point registers that the kernel considers scratch: - */ - struct ia64_fpreg f6; /* scratch */ - struct ia64_fpreg f7; /* scratch */ - struct ia64_fpreg f8; /* scratch */ - struct ia64_fpreg f9; /* scratch */ - struct ia64_fpreg f10; /* scratch */ - struct ia64_fpreg f11; /* scratch */ - - unsigned long r4; /* preserved */ - unsigned long r5; /* preserved */ - unsigned long r6; /* preserved */ - unsigned long r7; /* preserved */ - unsigned long eml_unat; /* used for emulating instruction */ - unsigned long pad0; /* alignment pad */ -}; - -static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v) -{ - return (struct kvm_pt_regs *) ((unsigned long) v + KVM_STK_OFFSET) - 1; -} - -typedef int kvm_vmm_entry(void); -typedef void kvm_tramp_entry(union context *host, union context *guest); - -struct kvm_vmm_info{ - struct module *module; - kvm_vmm_entry *vmm_entry; - kvm_tramp_entry *tramp_entry; - unsigned long vmm_ivt; - unsigned long patch_mov_ar; - unsigned long patch_mov_ar_sn2; -}; - -int kvm_highest_pending_irq(struct kvm_vcpu *vcpu); -int kvm_emulate_halt(struct kvm_vcpu *vcpu); -int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); -void kvm_sal_emul(struct kvm_vcpu *vcpu); - -#define __KVM_HAVE_ARCH_VM_ALLOC 1 -struct kvm *kvm_arch_alloc_vm(void); -void kvm_arch_free_vm(struct kvm *kvm); - -static inline void kvm_arch_sync_events(struct kvm *kvm) {} -static inline void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) {} -static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu) {} -static inline void kvm_arch_free_memslot(struct kvm *kvm, - struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {} -static inline void kvm_arch_memslots_updated(struct kvm *kvm) {} -static inline void kvm_arch_commit_memory_region(struct kvm *kvm, - struct kvm_userspace_memory_region *mem, - const struct kvm_memory_slot *old, - enum kvm_mr_change change) {} -static inline void kvm_arch_hardware_unsetup(void) {} - -#endif /* __ASSEMBLY__*/ - -#endif diff --git a/arch/ia64/include/asm/pvclock-abi.h b/arch/ia64/include/asm/pvclock-abi.h deleted file mode 100644 index 42b233bedeb5..000000000000 --- a/arch/ia64/include/asm/pvclock-abi.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * same structure to x86's - * Hopefully asm-x86/pvclock-abi.h would be moved to somewhere more generic. - * For now, define same duplicated definitions. - */ - -#ifndef _ASM_IA64__PVCLOCK_ABI_H -#define _ASM_IA64__PVCLOCK_ABI_H -#ifndef __ASSEMBLY__ - -/* - * These structs MUST NOT be changed. - * They are the ABI between hypervisor and guest OS. - * KVM is using this. - * - * pvclock_vcpu_time_info holds the system time and the tsc timestamp - * of the last update. So the guest can use the tsc delta to get a - * more precise system time. There is one per virtual cpu. - * - * pvclock_wall_clock references the point in time when the system - * time was zero (usually boot time), thus the guest calculates the - * current wall clock by adding the system time. - * - * Protocol for the "version" fields is: hypervisor raises it (making - * it uneven) before it starts updating the fields and raises it again - * (making it even) when it is done. Thus the guest can make sure the - * time values it got are consistent by checking the version before - * and after reading them. - */ - -struct pvclock_vcpu_time_info { - u32 version; - u32 pad0; - u64 tsc_timestamp; - u64 system_time; - u32 tsc_to_system_mul; - s8 tsc_shift; - u8 pad[3]; -} __attribute__((__packed__)); /* 32 bytes */ - -struct pvclock_wall_clock { - u32 version; - u32 sec; - u32 nsec; -} __attribute__((__packed__)); - -#endif /* __ASSEMBLY__ */ -#endif /* _ASM_IA64__PVCLOCK_ABI_H */ diff --git a/arch/ia64/include/uapi/asm/kvm.h b/arch/ia64/include/uapi/asm/kvm.h deleted file mode 100644 index 99503c284400..000000000000 --- a/arch/ia64/include/uapi/asm/kvm.h +++ /dev/null @@ -1,268 +0,0 @@ -#ifndef __ASM_IA64_KVM_H -#define __ASM_IA64_KVM_H - -/* - * kvm structure definitions for ia64 - * - * Copyright (C) 2007 Xiantao Zhang - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - * - */ - -#include -#include - -/* Select x86 specific features in */ -#define __KVM_HAVE_IOAPIC -#define __KVM_HAVE_IRQ_LINE - -/* Architectural interrupt line count. */ -#define KVM_NR_INTERRUPTS 256 - -#define KVM_IOAPIC_NUM_PINS 48 - -struct kvm_ioapic_state { - __u64 base_address; - __u32 ioregsel; - __u32 id; - __u32 irr; - __u32 pad; - union { - __u64 bits; - struct { - __u8 vector; - __u8 delivery_mode:3; - __u8 dest_mode:1; - __u8 delivery_status:1; - __u8 polarity:1; - __u8 remote_irr:1; - __u8 trig_mode:1; - __u8 mask:1; - __u8 reserve:7; - __u8 reserved[4]; - __u8 dest_id; - } fields; - } redirtbl[KVM_IOAPIC_NUM_PINS]; -}; - -#define KVM_IRQCHIP_PIC_MASTER 0 -#define KVM_IRQCHIP_PIC_SLAVE 1 -#define KVM_IRQCHIP_IOAPIC 2 -#define KVM_NR_IRQCHIPS 3 - -#define KVM_CONTEXT_SIZE 8*1024 - -struct kvm_fpreg { - union { - unsigned long bits[2]; - long double __dummy; /* force 16-byte alignment */ - } u; -}; - -union context { - /* 8K size */ - char dummy[KVM_CONTEXT_SIZE]; - struct { - unsigned long psr; - unsigned long pr; - unsigned long caller_unat; - unsigned long pad; - unsigned long gr[32]; - unsigned long ar[128]; - unsigned long br[8]; - unsigned long cr[128]; - unsigned long rr[8]; - unsigned long ibr[8]; - unsigned long dbr[8]; - unsigned long pkr[8]; - struct kvm_fpreg fr[128]; - }; -}; - -struct thash_data { - union { - struct { - unsigned long p : 1; /* 0 */ - unsigned long rv1 : 1; /* 1 */ - unsigned long ma : 3; /* 2-4 */ - unsigned long a : 1; /* 5 */ - unsigned long d : 1; /* 6 */ - unsigned long pl : 2; /* 7-8 */ - unsigned long ar : 3; /* 9-11 */ - unsigned long ppn : 38; /* 12-49 */ - unsigned long rv2 : 2; /* 50-51 */ - unsigned long ed : 1; /* 52 */ - unsigned long ig1 : 11; /* 53-63 */ - }; - struct { - unsigned long __rv1 : 53; /* 0-52 */ - unsigned long contiguous : 1; /*53 */ - unsigned long tc : 1; /* 54 TR or TC */ - unsigned long cl : 1; - /* 55 I side or D side cache line */ - unsigned long len : 4; /* 56-59 */ - unsigned long io : 1; /* 60 entry is for io or not */ - unsigned long nomap : 1; - /* 61 entry cann't be inserted into machine TLB.*/ - unsigned long checked : 1; - /* 62 for VTLB/VHPT sanity check */ - unsigned long invalid : 1; - /* 63 invalid entry */ - }; - unsigned long page_flags; - }; /* same for VHPT and TLB */ - - union { - struct { - unsigned long rv3 : 2; - unsigned long ps : 6; - unsigned long key : 24; - unsigned long rv4 : 32; - }; - unsigned long itir; - }; - union { - struct { - unsigned long ig2 : 12; - unsigned long vpn : 49; - unsigned long vrn : 3; - }; - unsigned long ifa; - unsigned long vadr; - struct { - unsigned long tag : 63; - unsigned long ti : 1; - }; - unsigned long etag; - }; - union { - struct thash_data *next; - unsigned long rid; - unsigned long gpaddr; - }; -}; - -#define NITRS 8 -#define NDTRS 8 - -struct saved_vpd { - unsigned long vhpi; - unsigned long vgr[16]; - unsigned long vbgr[16]; - unsigned long vnat; - unsigned long vbnat; - unsigned long vcpuid[5]; - unsigned long vpsr; - unsigned long vpr; - union { - unsigned long vcr[128]; - struct { - unsigned long dcr; - unsigned long itm; - unsigned long iva; - unsigned long rsv1[5]; - unsigned long pta; - unsigned long rsv2[7]; - unsigned long ipsr; - unsigned long isr; - unsigned long rsv3; - unsigned long iip; - unsigned long ifa; - unsigned long itir; - unsigned long iipa; - unsigned long ifs; - unsigned long iim; - unsigned long iha; - unsigned long rsv4[38]; - unsigned long lid; - unsigned long ivr; - unsigned long tpr; - unsigned long eoi; - unsigned long irr[4]; - unsigned long itv; - unsigned long pmv; - unsigned long cmcv; - unsigned long rsv5[5]; - unsigned long lrr0; - unsigned long lrr1; - unsigned long rsv6[46]; - }; - }; -}; - -struct kvm_regs { - struct saved_vpd vpd; - /*Arch-regs*/ - int mp_state; - unsigned long vmm_rr; - /* TR and TC. */ - struct thash_data itrs[NITRS]; - struct thash_data dtrs[NDTRS]; - /* Bit is set if there is a tr/tc for the region. */ - unsigned char itr_regions; - unsigned char dtr_regions; - unsigned char tc_regions; - - char irq_check; - unsigned long saved_itc; - unsigned long itc_check; - unsigned long timer_check; - unsigned long timer_pending; - unsigned long last_itc; - - unsigned long vrr[8]; - unsigned long ibr[8]; - unsigned long dbr[8]; - unsigned long insvc[4]; /* Interrupt in service. */ - unsigned long xtp; - - unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */ - unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */ - unsigned long metaphysical_saved_rr0; /* from kvm_arch */ - unsigned long metaphysical_saved_rr4; /* from kvm_arch */ - unsigned long fp_psr; /*used for lazy float register */ - unsigned long saved_gp; - /*for phycial emulation */ - - union context saved_guest; - - unsigned long reserved[64]; /* for future use */ -}; - -struct kvm_sregs { -}; - -struct kvm_fpu { -}; - -#define KVM_IA64_VCPU_STACK_SHIFT 16 -#define KVM_IA64_VCPU_STACK_SIZE (1UL << KVM_IA64_VCPU_STACK_SHIFT) - -struct kvm_ia64_vcpu_stack { - unsigned char stack[KVM_IA64_VCPU_STACK_SIZE]; -}; - -struct kvm_debug_exit_arch { -}; - -/* for KVM_SET_GUEST_DEBUG */ -struct kvm_guest_debug_arch { -}; - -/* definition of registers in kvm_run */ -struct kvm_sync_regs { -}; - -#endif diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig deleted file mode 100644 index 3d50ea955c4c..000000000000 --- a/arch/ia64/kvm/Kconfig +++ /dev/null @@ -1,66 +0,0 @@ -# -# KVM configuration -# - -source "virt/kvm/Kconfig" - -menuconfig VIRTUALIZATION - bool "Virtualization" - depends on HAVE_KVM || IA64 - default y - ---help--- - Say Y here to get to see options for using your Linux host to run other - operating systems inside virtual machines (guests). - This option alone does not add any kernel code. - - If you say N, all options in this submenu will be skipped and disabled. - -if VIRTUALIZATION - -config KVM - tristate "Kernel-based Virtual Machine (KVM) support" - depends on BROKEN - depends on HAVE_KVM && MODULES - depends on BROKEN - select PREEMPT_NOTIFIERS - select ANON_INODES - select HAVE_KVM_IRQCHIP - select HAVE_KVM_IRQFD - select HAVE_KVM_IRQ_ROUTING - select KVM_APIC_ARCHITECTURE - select KVM_MMIO - ---help--- - Support hosting fully virtualized guest machines using hardware - virtualization extensions. You will need a fairly recent - processor equipped with virtualization extensions. You will also - need to select one or more of the processor modules below. - - This module provides access to the hardware capabilities through - a character device node named /dev/kvm. - - To compile this as a module, choose M here: the module - will be called kvm. - - If unsure, say N. - -config KVM_INTEL - tristate "KVM for Intel Itanium 2 processors support" - depends on KVM && m - ---help--- - Provides support for KVM on Itanium 2 processors equipped with the VT - extensions. - -config KVM_DEVICE_ASSIGNMENT - bool "KVM legacy PCI device assignment support" - depends on KVM && PCI && IOMMU_API - default y - ---help--- - Provide support for legacy PCI device assignment through KVM. The - kernel now also supports a full featured userspace device driver - framework through VFIO, which supersedes much of this support. - - If unsure, say Y. - -source drivers/vhost/Kconfig - -endif # VIRTUALIZATION diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile deleted file mode 100644 index 18e45ec49bbf..000000000000 --- a/arch/ia64/kvm/Makefile +++ /dev/null @@ -1,67 +0,0 @@ -#This Make file is to generate asm-offsets.h and build source. -# - -#Generate asm-offsets.h for vmm module build -offsets-file := asm-offsets.h - -always := $(offsets-file) -targets := $(offsets-file) -targets += arch/ia64/kvm/asm-offsets.s - -# Default sed regexp - multiline due to syntax constraints -define sed-y - "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}" -endef - -quiet_cmd_offsets = GEN $@ -define cmd_offsets - (set -e; \ - echo "#ifndef __ASM_KVM_OFFSETS_H__"; \ - echo "#define __ASM_KVM_OFFSETS_H__"; \ - echo "/*"; \ - echo " * DO NOT MODIFY."; \ - echo " *"; \ - echo " * This file was generated by Makefile"; \ - echo " *"; \ - echo " */"; \ - echo ""; \ - sed -ne $(sed-y) $<; \ - echo ""; \ - echo "#endif" ) > $@ -endef - -# We use internal rules to avoid the "is up to date" message from make -arch/ia64/kvm/asm-offsets.s: arch/ia64/kvm/asm-offsets.c \ - $(wildcard $(srctree)/arch/ia64/include/asm/*.h)\ - $(wildcard $(srctree)/include/linux/*.h) - $(call if_changed_dep,cc_s_c) - -$(obj)/$(offsets-file): arch/ia64/kvm/asm-offsets.s - $(call cmd,offsets) - -FORCE : $(obj)/$(offsets-file) - -# -# Makefile for Kernel-based Virtual Machine module -# - -ccflags-y := -Ivirt/kvm -Iarch/ia64/kvm/ -asflags-y := -Ivirt/kvm -Iarch/ia64/kvm/ -KVM := ../../../virt/kvm - -common-objs = $(KVM)/kvm_main.o $(KVM)/ioapic.o \ - $(KVM)/coalesced_mmio.o $(KVM)/irq_comm.o - -ifeq ($(CONFIG_KVM_DEVICE_ASSIGNMENT),y) -common-objs += $(KVM)/assigned-dev.o $(KVM)/iommu.o -endif - -kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o -obj-$(CONFIG_KVM) += kvm.o - -CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127 -kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \ - vtlb.o process.o kvm_lib.o -#Add link memcpy and memset to avoid possible structure assignment error -kvm-intel-objs += memcpy.o memset.o -obj-$(CONFIG_KVM_INTEL) += kvm-intel.o diff --git a/arch/ia64/kvm/asm-offsets.c b/arch/ia64/kvm/asm-offsets.c deleted file mode 100644 index 9324c875caf5..000000000000 --- a/arch/ia64/kvm/asm-offsets.c +++ /dev/null @@ -1,241 +0,0 @@ -/* - * asm-offsets.c Generate definitions needed by assembly language modules. - * This code generates raw asm output which is post-processed - * to extract and format the required data. - * - * Anthony Xu - * Xiantao Zhang - * Copyright (c) 2007 Intel Corporation KVM support. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - * - */ - -#include -#include - -#include "vcpu.h" - -void foo(void) -{ - DEFINE(VMM_TASK_SIZE, sizeof(struct kvm_vcpu)); - DEFINE(VMM_PT_REGS_SIZE, sizeof(struct kvm_pt_regs)); - - BLANK(); - - DEFINE(VMM_VCPU_META_RR0_OFFSET, - offsetof(struct kvm_vcpu, arch.metaphysical_rr0)); - DEFINE(VMM_VCPU_META_SAVED_RR0_OFFSET, - offsetof(struct kvm_vcpu, - arch.metaphysical_saved_rr0)); - DEFINE(VMM_VCPU_VRR0_OFFSET, - offsetof(struct kvm_vcpu, arch.vrr[0])); - DEFINE(VMM_VPD_IRR0_OFFSET, - offsetof(struct vpd, irr[0])); - DEFINE(VMM_VCPU_ITC_CHECK_OFFSET, - offsetof(struct kvm_vcpu, arch.itc_check)); - DEFINE(VMM_VCPU_IRQ_CHECK_OFFSET, - offsetof(struct kvm_vcpu, arch.irq_check)); - DEFINE(VMM_VPD_VHPI_OFFSET, - offsetof(struct vpd, vhpi)); - DEFINE(VMM_VCPU_VSA_BASE_OFFSET, - offsetof(struct kvm_vcpu, arch.vsa_base)); - DEFINE(VMM_VCPU_VPD_OFFSET, - offsetof(struct kvm_vcpu, arch.vpd)); - DEFINE(VMM_VCPU_IRQ_CHECK, - offsetof(struct kvm_vcpu, arch.irq_check)); - DEFINE(VMM_VCPU_TIMER_PENDING, - offsetof(struct kvm_vcpu, arch.timer_pending)); - DEFINE(VMM_VCPU_META_SAVED_RR0_OFFSET, - offsetof(struct kvm_vcpu, arch.metaphysical_saved_rr0)); - DEFINE(VMM_VCPU_MODE_FLAGS_OFFSET, - offsetof(struct kvm_vcpu, arch.mode_flags)); - DEFINE(VMM_VCPU_ITC_OFS_OFFSET, - offsetof(struct kvm_vcpu, arch.itc_offset)); - DEFINE(VMM_VCPU_LAST_ITC_OFFSET, - offsetof(struct kvm_vcpu, arch.last_itc)); - DEFINE(VMM_VCPU_SAVED_GP_OFFSET, - offsetof(struct kvm_vcpu, arch.saved_gp)); - - BLANK(); - - DEFINE(VMM_PT_REGS_B6_OFFSET, - offsetof(struct kvm_pt_regs, b6)); - DEFINE(VMM_PT_REGS_B7_OFFSET, - offsetof(struct kvm_pt_regs, b7)); - DEFINE(VMM_PT_REGS_AR_CSD_OFFSET, - offsetof(struct kvm_pt_regs, ar_csd)); - DEFINE(VMM_PT_REGS_AR_SSD_OFFSET, - offsetof(struct kvm_pt_regs, ar_ssd)); - DEFINE(VMM_PT_REGS_R8_OFFSET, - offsetof(struct kvm_pt_regs, r8)); - DEFINE(VMM_PT_REGS_R9_OFFSET, - offsetof(struct kvm_pt_regs, r9)); - DEFINE(VMM_PT_REGS_R10_OFFSET, - offsetof(struct kvm_pt_regs, r10)); - DEFINE(VMM_PT_REGS_R11_OFFSET, - offsetof(struct kvm_pt_regs, r11)); - DEFINE(VMM_PT_REGS_CR_IPSR_OFFSET, - offsetof(struct kvm_pt_regs, cr_ipsr)); - DEFINE(VMM_PT_REGS_CR_IIP_OFFSET, - offsetof(struct kvm_pt_regs, cr_iip)); - DEFINE(VMM_PT_REGS_CR_IFS_OFFSET, - offsetof(struct kvm_pt_regs, cr_ifs)); - DEFINE(VMM_PT_REGS_AR_UNAT_OFFSET, - offsetof(struct kvm_pt_regs, ar_unat)); - DEFINE(VMM_PT_REGS_AR_PFS_OFFSET, - offsetof(struct kvm_pt_regs, ar_pfs)); - DEFINE(VMM_PT_REGS_AR_RSC_OFFSET, - offsetof(struct kvm_pt_regs, ar_rsc)); - DEFINE(VMM_PT_REGS_AR_RNAT_OFFSET, - offsetof(struct kvm_pt_regs, ar_rnat)); - - DEFINE(VMM_PT_REGS_AR_BSPSTORE_OFFSET, - offsetof(struct kvm_pt_regs, ar_bspstore)); - DEFINE(VMM_PT_REGS_PR_OFFSET, - offsetof(struct kvm_pt_regs, pr)); - DEFINE(VMM_PT_REGS_B0_OFFSET, - offsetof(struct kvm_pt_regs, b0)); - DEFINE(VMM_PT_REGS_LOADRS_OFFSET, - offsetof(struct kvm_pt_regs, loadrs)); - DEFINE(VMM_PT_REGS_R1_OFFSET, - offsetof(struct kvm_pt_regs, r1)); - DEFINE(VMM_PT_REGS_R12_OFFSET, - offsetof(struct kvm_pt_regs, r12)); - DEFINE(VMM_PT_REGS_R13_OFFSET, - offsetof(struct kvm_pt_regs, r13)); - DEFINE(VMM_PT_REGS_AR_FPSR_OFFSET, - offsetof(struct kvm_pt_regs, ar_fpsr)); - DEFINE(VMM_PT_REGS_R15_OFFSET, - offsetof(struct kvm_pt_regs, r15)); - DEFINE(VMM_PT_REGS_R14_OFFSET, - offsetof(struct kvm_pt_regs, r14)); - DEFINE(VMM_PT_REGS_R2_OFFSET, - offsetof(struct kvm_pt_regs, r2)); - DEFINE(VMM_PT_REGS_R3_OFFSET, - offsetof(struct kvm_pt_regs, r3)); - DEFINE(VMM_PT_REGS_R16_OFFSET, - offsetof(struct kvm_pt_regs, r16)); - DEFINE(VMM_PT_REGS_R17_OFFSET, - offsetof(struct kvm_pt_regs, r17)); - DEFINE(VMM_PT_REGS_R18_OFFSET, - offsetof(struct kvm_pt_regs, r18)); - DEFINE(VMM_PT_REGS_R19_OFFSET, - offsetof(struct kvm_pt_regs, r19)); - DEFINE(VMM_PT_REGS_R20_OFFSET, - offsetof(struct kvm_pt_regs, r20)); - DEFINE(VMM_PT_REGS_R21_OFFSET, - offsetof(struct kvm_pt_regs, r21)); - DEFINE(VMM_PT_REGS_R22_OFFSET, - offsetof(struct kvm_pt_regs, r22)); - DEFINE(VMM_PT_REGS_R23_OFFSET, - offsetof(struct kvm_pt_regs, r23)); - DEFINE(VMM_PT_REGS_R24_OFFSET, - offsetof(struct kvm_pt_regs, r24)); - DEFINE(VMM_PT_REGS_R25_OFFSET, - offsetof(struct kvm_pt_regs, r25)); - DEFINE(VMM_PT_REGS_R26_OFFSET, - offsetof(struct kvm_pt_regs, r26)); - DEFINE(VMM_PT_REGS_R27_OFFSET, - offsetof(struct kvm_pt_regs, r27)); - DEFINE(VMM_PT_REGS_R28_OFFSET, - offsetof(struct kvm_pt_regs, r28)); - DEFINE(VMM_PT_REGS_R29_OFFSET, - offsetof(struct kvm_pt_regs, r29)); - DEFINE(VMM_PT_REGS_R30_OFFSET, - offsetof(struct kvm_pt_regs, r30)); - DEFINE(VMM_PT_REGS_R31_OFFSET, - offsetof(struct kvm_pt_regs, r31)); - DEFINE(VMM_PT_REGS_AR_CCV_OFFSET, - offsetof(struct kvm_pt_regs, ar_ccv)); - DEFINE(VMM_PT_REGS_F6_OFFSET, - offsetof(struct kvm_pt_regs, f6)); - DEFINE(VMM_PT_REGS_F7_OFFSET, - offsetof(struct kvm_pt_regs, f7)); - DEFINE(VMM_PT_REGS_F8_OFFSET, - offsetof(struct kvm_pt_regs, f8)); - DEFINE(VMM_PT_REGS_F9_OFFSET, - offsetof(struct kvm_pt_regs, f9)); - DEFINE(VMM_PT_REGS_F10_OFFSET, - offsetof(struct kvm_pt_regs, f10)); - DEFINE(VMM_PT_REGS_F11_OFFSET, - offsetof(struct kvm_pt_regs, f11)); - DEFINE(VMM_PT_REGS_R4_OFFSET, - offsetof(struct kvm_pt_regs, r4)); - DEFINE(VMM_PT_REGS_R5_OFFSET, - offsetof(struct kvm_pt_regs, r5)); - DEFINE(VMM_PT_REGS_R6_OFFSET, - offsetof(struct kvm_pt_regs, r6)); - DEFINE(VMM_PT_REGS_R7_OFFSET, - offsetof(struct kvm_pt_regs, r7)); - DEFINE(VMM_PT_REGS_EML_UNAT_OFFSET, - offsetof(struct kvm_pt_regs, eml_unat)); - DEFINE(VMM_VCPU_IIPA_OFFSET, - offsetof(struct kvm_vcpu, arch.cr_iipa)); - DEFINE(VMM_VCPU_OPCODE_OFFSET, - offsetof(struct kvm_vcpu, arch.opcode)); - DEFINE(VMM_VCPU_CAUSE_OFFSET, offsetof(struct kvm_vcpu, arch.cause)); - DEFINE(VMM_VCPU_ISR_OFFSET, - offsetof(struct kvm_vcpu, arch.cr_isr)); - DEFINE(VMM_PT_REGS_R16_SLOT, - (((offsetof(struct kvm_pt_regs, r16) - - sizeof(struct kvm_pt_regs)) >> 3) & 0x3f)); - DEFINE(VMM_VCPU_MODE_FLAGS_OFFSET, - offsetof(struct kvm_vcpu, arch.mode_flags)); - DEFINE(VMM_VCPU_GP_OFFSET, offsetof(struct kvm_vcpu, arch.__gp)); - BLANK(); - - DEFINE(VMM_VPD_BASE_OFFSET, offsetof(struct kvm_vcpu, arch.vpd)); - DEFINE(VMM_VPD_VIFS_OFFSET, offsetof(struct vpd, ifs)); - DEFINE(VMM_VLSAPIC_INSVC_BASE_OFFSET, - offsetof(struct kvm_vcpu, arch.insvc[0])); - DEFINE(VMM_VPD_VPTA_OFFSET, offsetof(struct vpd, pta)); - DEFINE(VMM_VPD_VPSR_OFFSET, offsetof(struct vpd, vpsr)); - - DEFINE(VMM_CTX_R4_OFFSET, offsetof(union context, gr[4])); - DEFINE(VMM_CTX_R5_OFFSET, offsetof(union context, gr[5])); - DEFINE(VMM_CTX_R12_OFFSET, offsetof(union context, gr[12])); - DEFINE(VMM_CTX_R13_OFFSET, offsetof(union context, gr[13])); - DEFINE(VMM_CTX_KR0_OFFSET, offsetof(union context, ar[0])); - DEFINE(VMM_CTX_KR1_OFFSET, offsetof(union context, ar[1])); - DEFINE(VMM_CTX_B0_OFFSET, offsetof(union context, br[0])); - DEFINE(VMM_CTX_B1_OFFSET, offsetof(union context, br[1])); - DEFINE(VMM_CTX_B2_OFFSET, offsetof(union context, br[2])); - DEFINE(VMM_CTX_RR0_OFFSET, offsetof(union context, rr[0])); - DEFINE(VMM_CTX_RSC_OFFSET, offsetof(union context, ar[16])); - DEFINE(VMM_CTX_BSPSTORE_OFFSET, offsetof(union context, ar[18])); - DEFINE(VMM_CTX_RNAT_OFFSET, offsetof(union context, ar[19])); - DEFINE(VMM_CTX_FCR_OFFSET, offsetof(union context, ar[21])); - DEFINE(VMM_CTX_EFLAG_OFFSET, offsetof(union context, ar[24])); - DEFINE(VMM_CTX_CFLG_OFFSET, offsetof(union context, ar[27])); - DEFINE(VMM_CTX_FSR_OFFSET, offsetof(union context, ar[28])); - DEFINE(VMM_CTX_FIR_OFFSET, offsetof(union context, ar[29])); - DEFINE(VMM_CTX_FDR_OFFSET, offsetof(union context, ar[30])); - DEFINE(VMM_CTX_UNAT_OFFSET, offsetof(union context, ar[36])); - DEFINE(VMM_CTX_FPSR_OFFSET, offsetof(union context, ar[40])); - DEFINE(VMM_CTX_PFS_OFFSET, offsetof(union context, ar[64])); - DEFINE(VMM_CTX_LC_OFFSET, offsetof(union context, ar[65])); - DEFINE(VMM_CTX_DCR_OFFSET, offsetof(union context, cr[0])); - DEFINE(VMM_CTX_IVA_OFFSET, offsetof(union context, cr[2])); - DEFINE(VMM_CTX_PTA_OFFSET, offsetof(union context, cr[8])); - DEFINE(VMM_CTX_IBR0_OFFSET, offsetof(union context, ibr[0])); - DEFINE(VMM_CTX_DBR0_OFFSET, offsetof(union context, dbr[0])); - DEFINE(VMM_CTX_F2_OFFSET, offsetof(union context, fr[2])); - DEFINE(VMM_CTX_F3_OFFSET, offsetof(union context, fr[3])); - DEFINE(VMM_CTX_F32_OFFSET, offsetof(union context, fr[32])); - DEFINE(VMM_CTX_F33_OFFSET, offsetof(union context, fr[33])); - DEFINE(VMM_CTX_PKR0_OFFSET, offsetof(union context, pkr[0])); - DEFINE(VMM_CTX_PSR_OFFSET, offsetof(union context, psr)); - BLANK(); -} diff --git a/arch/ia64/kvm/irq.h b/arch/ia64/kvm/irq.h deleted file mode 100644 index c0785a728271..000000000000 --- a/arch/ia64/kvm/irq.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * irq.h: In-kernel interrupt controller related definitions - * Copyright (c) 2008, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - * - * Authors: - * Xiantao Zhang - * - */ - -#ifndef __IRQ_H -#define __IRQ_H - -#include "lapic.h" - -static inline int irqchip_in_kernel(struct kvm *kvm) -{ - return 1; -} - -#endif diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c deleted file mode 100644 index ec6b9acb6bea..000000000000 --- a/arch/ia64/kvm/kvm-ia64.c +++ /dev/null @@ -1,1942 +0,0 @@ -/* - * kvm_ia64.c: Basic KVM support On Itanium series processors - * - * - * Copyright (C) 2007, Intel Corporation. - * Xiantao Zhang (xiantao.zhang@intel.com) - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "misc.h" -#include "vti.h" -#include "iodev.h" -#include "ioapic.h" -#include "lapic.h" -#include "irq.h" - -static unsigned long kvm_vmm_base; -static unsigned long kvm_vsa_base; -static unsigned long kvm_vm_buffer; -static unsigned long kvm_vm_buffer_size; -unsigned long kvm_vmm_gp; - -static long vp_env_info; - -static struct kvm_vmm_info *kvm_vmm_info; - -static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu); - -struct kvm_stats_debugfs_item debugfs_entries[] = { - { NULL } -}; - -static unsigned long kvm_get_itc(struct kvm_vcpu *vcpu) -{ -#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) - if (vcpu->kvm->arch.is_sn2) - return rtc_time(); - else -#endif - return ia64_getreg(_IA64_REG_AR_ITC); -} - -static void kvm_flush_icache(unsigned long start, unsigned long len) -{ - int l; - - for (l = 0; l < (len + 32); l += 32) - ia64_fc((void *)(start + l)); - - ia64_sync_i(); - ia64_srlz_i(); -} - -static void kvm_flush_tlb_all(void) -{ - unsigned long i, j, count0, count1, stride0, stride1, addr; - long flags; - - addr = local_cpu_data->ptce_base; - count0 = local_cpu_data->ptce_count[0]; - count1 = local_cpu_data->ptce_count[1]; - stride0 = local_cpu_data->ptce_stride[0]; - stride1 = local_cpu_data->ptce_stride[1]; - - local_irq_save(flags); - for (i = 0; i < count0; ++i) { - for (j = 0; j < count1; ++j) { - ia64_ptce(addr); - addr += stride1; - } - addr += stride0; - } - local_irq_restore(flags); - ia64_srlz_i(); /* srlz.i implies srlz.d */ -} - -long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler) -{ - struct ia64_pal_retval iprv; - - PAL_CALL_STK(iprv, PAL_VP_CREATE, (u64)vpd, (u64)host_iva, - (u64)opt_handler); - - return iprv.status; -} - -static DEFINE_SPINLOCK(vp_lock); - -int kvm_arch_hardware_enable(void) -{ - long status; - long tmp_base; - unsigned long pte; - unsigned long saved_psr; - int slot; - - pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL)); - local_irq_save(saved_psr); - slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); - local_irq_restore(saved_psr); - if (slot < 0) - return -EINVAL; - - spin_lock(&vp_lock); - status = ia64_pal_vp_init_env(kvm_vsa_base ? - VP_INIT_ENV : VP_INIT_ENV_INITALIZE, - __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); - if (status != 0) { - spin_unlock(&vp_lock); - printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); - return -EINVAL; - } - - if (!kvm_vsa_base) { - kvm_vsa_base = tmp_base; - printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base); - } - spin_unlock(&vp_lock); - ia64_ptr_entry(0x3, slot); - - return 0; -} - -void kvm_arch_hardware_disable(void) -{ - - long status; - int slot; - unsigned long pte; - unsigned long saved_psr; - unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA); - - pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), - PAGE_KERNEL)); - - local_irq_save(saved_psr); - slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); - local_irq_restore(saved_psr); - if (slot < 0) - return; - - status = ia64_pal_vp_exit_env(host_iva); - if (status) - printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n", - status); - ia64_ptr_entry(0x3, slot); -} - -void kvm_arch_check_processor_compat(void *rtn) -{ - *(int *)rtn = 0; -} - -int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) -{ - - int r; - - switch (ext) { - case KVM_CAP_IRQCHIP: - case KVM_CAP_MP_STATE: - case KVM_CAP_IRQ_INJECT_STATUS: - case KVM_CAP_IOAPIC_POLARITY_IGNORED: - r = 1; - break; - case KVM_CAP_COALESCED_MMIO: - r = KVM_COALESCED_MMIO_PAGE_OFFSET; - break; -#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT - case KVM_CAP_IOMMU: - r = iommu_present(&pci_bus_type); - break; -#endif - default: - r = 0; - } - return r; - -} - -static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) -{ - kvm_run->exit_reason = KVM_EXIT_UNKNOWN; - kvm_run->hw.hardware_exit_reason = 1; - return 0; -} - -static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) -{ - struct kvm_mmio_req *p; - struct kvm_io_device *mmio_dev; - int r; - - p = kvm_get_vcpu_ioreq(vcpu); - - if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS) - goto mmio; - vcpu->mmio_needed = 1; - vcpu->mmio_fragments[0].gpa = kvm_run->mmio.phys_addr = p->addr; - vcpu->mmio_fragments[0].len = kvm_run->mmio.len = p->size; - vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir; - - if (vcpu->mmio_is_write) - memcpy(vcpu->arch.mmio_data, &p->data, p->size); - memcpy(kvm_run->mmio.data, &p->data, p->size); - kvm_run->exit_reason = KVM_EXIT_MMIO; - return 0; -mmio: - if (p->dir) - r = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, p->addr, - p->size, &p->data); - else - r = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, p->addr, - p->size, &p->data); - if (r) - printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr); - p->state = STATE_IORESP_READY; - - return 1; -} - -static int handle_pal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) -{ - struct exit_ctl_data *p; - - p = kvm_get_exit_data(vcpu); - - if (p->exit_reason == EXIT_REASON_PAL_CALL) - return kvm_pal_emul(vcpu, kvm_run); - else { - kvm_run->exit_reason = KVM_EXIT_UNKNOWN; - kvm_run->hw.hardware_exit_reason = 2; - return 0; - } -} - -static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) -{ - struct exit_ctl_data *p; - - p = kvm_get_exit_data(vcpu); - - if (p->exit_reason == EXIT_REASON_SAL_CALL) { - kvm_sal_emul(vcpu); - return 1; - } else { - kvm_run->exit_reason = KVM_EXIT_UNKNOWN; - kvm_run->hw.hardware_exit_reason = 3; - return 0; - } - -} - -static int __apic_accept_irq(struct kvm_vcpu *vcpu, uint64_t vector) -{ - struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); - - if (!test_and_set_bit(vector, &vpd->irr[0])) { - vcpu->arch.irq_new_pending = 1; - kvm_vcpu_kick(vcpu); - return 1; - } - return 0; -} - -/* - * offset: address offset to IPI space. - * value: deliver value. - */ -static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm, - uint64_t vector) -{ - switch (dm) { - case SAPIC_FIXED: - break; - case SAPIC_NMI: - vector = 2; - break; - case SAPIC_EXTINT: - vector = 0; - break; - case SAPIC_INIT: - case SAPIC_PMI: - default: - printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n"); - return; - } - __apic_accept_irq(vcpu, vector); -} - -static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id, - unsigned long eid) -{ - union ia64_lid lid; - int i; - struct kvm_vcpu *vcpu; - - kvm_for_each_vcpu(i, vcpu, kvm) { - lid.val = VCPU_LID(vcpu); - if (lid.id == id && lid.eid == eid) - return vcpu; - } - - return NULL; -} - -static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) -{ - struct exit_ctl_data *p = kvm_get_exit_data(vcpu); - struct kvm_vcpu *target_vcpu; - struct kvm_pt_regs *regs; - union ia64_ipi_a addr = p->u.ipi_data.addr; - union ia64_ipi_d data = p->u.ipi_data.data; - - target_vcpu = lid_to_vcpu(vcpu->kvm, addr.id, addr.eid); - if (!target_vcpu) - return handle_vm_error(vcpu, kvm_run); - - if (!target_vcpu->arch.launched) { - regs = vcpu_regs(target_vcpu); - - regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip; - regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp; - - target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; - if (waitqueue_active(&target_vcpu->wq)) - wake_up_interruptible(&target_vcpu->wq); - } else { - vcpu_deliver_ipi(target_vcpu, data.dm, data.vector); - if (target_vcpu != vcpu) - kvm_vcpu_kick(target_vcpu); - } - - return 1; -} - -struct call_data { - struct kvm_ptc_g ptc_g_data; - struct kvm_vcpu *vcpu; -}; - -static void vcpu_global_purge(void *info) -{ - struct call_data *p = (struct call_data *)info; - struct kvm_vcpu *vcpu = p->vcpu; - - if (test_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) - return; - - set_bit(KVM_REQ_PTC_G, &vcpu->requests); - if (vcpu->arch.ptc_g_count < MAX_PTC_G_NUM) { - vcpu->arch.ptc_g_data[vcpu->arch.ptc_g_count++] = - p->ptc_g_data; - } else { - clear_bit(KVM_REQ_PTC_G, &vcpu->requests); - vcpu->arch.ptc_g_count = 0; - set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests); - } -} - -static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) -{ - struct exit_ctl_data *p = kvm_get_exit_data(vcpu); - struct kvm *kvm = vcpu->kvm; - struct call_data call_data; - int i; - struct kvm_vcpu *vcpui; - - call_data.ptc_g_data = p->u.ptc_g_data; - - kvm_for_each_vcpu(i, vcpui, kvm) { - if (vcpui->arch.mp_state == KVM_MP_STATE_UNINITIALIZED || - vcpu == vcpui) - continue; - - if (waitqueue_active(&vcpui->wq)) - wake_up_interruptible(&vcpui->wq); - - if (vcpui->cpu != -1) { - call_data.vcpu = vcpui; - smp_call_function_single(vcpui->cpu, - vcpu_global_purge, &call_data, 1); - } else - printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n"); - - } - return 1; -} - -static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) -{ - return 1; -} - -static int kvm_sn2_setup_mappings(struct kvm_vcpu *vcpu) -{ - unsigned long pte, rtc_phys_addr, map_addr; - int slot; - - map_addr = KVM_VMM_BASE + (1UL << KVM_VMM_SHIFT); - rtc_phys_addr = LOCAL_MMR_OFFSET | SH_RTC; - pte = pte_val(mk_pte_phys(rtc_phys_addr, PAGE_KERNEL_UC)); - slot = ia64_itr_entry(0x3, map_addr, pte, PAGE_SHIFT); - vcpu->arch.sn_rtc_tr_slot = slot; - if (slot < 0) { - printk(KERN_ERR "Mayday mayday! RTC mapping failed!\n"); - slot = 0; - } - return slot; -} - -int kvm_emulate_halt(struct kvm_vcpu *vcpu) -{ - - ktime_t kt; - long itc_diff; - unsigned long vcpu_now_itc; - unsigned long expires; - struct hrtimer *p_ht = &vcpu->arch.hlt_timer; - unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec; - struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); - - if (irqchip_in_kernel(vcpu->kvm)) { - - vcpu_now_itc = kvm_get_itc(vcpu) + vcpu->arch.itc_offset; - - if (time_after(vcpu_now_itc, vpd->itm)) { - vcpu->arch.timer_check = 1; - return 1; - } - itc_diff = vpd->itm - vcpu_now_itc; - if (itc_diff < 0) - itc_diff = -itc_diff; - - expires = div64_u64(itc_diff, cyc_per_usec); - kt = ktime_set(0, 1000 * expires); - - vcpu->arch.ht_active = 1; - hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); - - vcpu->arch.mp_state = KVM_MP_STATE_HALTED; - kvm_vcpu_block(vcpu); - hrtimer_cancel(p_ht); - vcpu->arch.ht_active = 0; - - if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests) || - kvm_cpu_has_pending_timer(vcpu)) - if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) - vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; - - if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) - return -EINTR; - return 1; - } else { - printk(KERN_ERR"kvm: Unsupported userspace halt!"); - return 0; - } -} - -static int handle_vm_shutdown(struct kvm_vcpu *vcpu, - struct kvm_run *kvm_run) -{ - kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; - return 0; -} - -static int handle_external_interrupt(struct kvm_vcpu *vcpu, - struct kvm_run *kvm_run) -{ - return 1; -} - -static int handle_vcpu_debug(struct kvm_vcpu *vcpu, - struct kvm_run *kvm_run) -{ - printk("VMM: %s", vcpu->arch.log_buf); - return 1; -} - -static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu, - struct kvm_run *kvm_run) = { - [EXIT_REASON_VM_PANIC] = handle_vm_error, - [EXIT_REASON_MMIO_INSTRUCTION] = handle_mmio, - [EXIT_REASON_PAL_CALL] = handle_pal_call, - [EXIT_REASON_SAL_CALL] = handle_sal_call, - [EXIT_REASON_SWITCH_RR6] = handle_switch_rr6, - [EXIT_REASON_VM_DESTROY] = handle_vm_shutdown, - [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, - [EXIT_REASON_IPI] = handle_ipi, - [EXIT_REASON_PTC_G] = handle_global_purge, - [EXIT_REASON_DEBUG] = handle_vcpu_debug, - -}; - -static const int kvm_vti_max_exit_handlers = - sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers); - -static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu) -{ - struct exit_ctl_data *p_exit_data; - - p_exit_data = kvm_get_exit_data(vcpu); - return p_exit_data->exit_reason; -} - -/* - * The guest has exited. See if we can fix it or if we need userspace - * assistance. - */ -static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) -{ - u32 exit_reason = kvm_get_exit_reason(vcpu); - vcpu->arch.last_exit = exit_reason; - - if (exit_reason < kvm_vti_max_exit_handlers - && kvm_vti_exit_handlers[exit_reason]) - return kvm_vti_exit_handlers[exit_reason](vcpu, kvm_run); - else { - kvm_run->exit_reason = KVM_EXIT_UNKNOWN; - kvm_run->hw.hardware_exit_reason = exit_reason; - } - return 0; -} - -static inline void vti_set_rr6(unsigned long rr6) -{ - ia64_set_rr(RR6, rr6); - ia64_srlz_i(); -} - -static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu) -{ - unsigned long pte; - struct kvm *kvm = vcpu->kvm; - int r; - - /*Insert a pair of tr to map vmm*/ - pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL)); - r = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); - if (r < 0) - goto out; - vcpu->arch.vmm_tr_slot = r; - /*Insert a pairt of tr to map data of vm*/ - pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL)); - r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE, - pte, KVM_VM_DATA_SHIFT); - if (r < 0) - goto out; - vcpu->arch.vm_tr_slot = r; - -#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) - if (kvm->arch.is_sn2) { - r = kvm_sn2_setup_mappings(vcpu); - if (r < 0) - goto out; - } -#endif - - r = 0; -out: - return r; -} - -static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu) -{ - struct kvm *kvm = vcpu->kvm; - ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot); - ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot); -#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) - if (kvm->arch.is_sn2) - ia64_ptr_entry(0x3, vcpu->arch.sn_rtc_tr_slot); -#endif -} - -static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu) -{ - unsigned long psr; - int r; - int cpu = smp_processor_id(); - - if (vcpu->arch.last_run_cpu != cpu || - per_cpu(last_vcpu, cpu) != vcpu) { - per_cpu(last_vcpu, cpu) = vcpu; - vcpu->arch.last_run_cpu = cpu; - kvm_flush_tlb_all(); - } - - vcpu->arch.host_rr6 = ia64_get_rr(RR6); - vti_set_rr6(vcpu->arch.vmm_rr); - local_irq_save(psr); - r = kvm_insert_vmm_mapping(vcpu); - local_irq_restore(psr); - return r; -} - -static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu) -{ - kvm_purge_vmm_mapping(vcpu); - vti_set_rr6(vcpu->arch.host_rr6); -} - -static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) -{ - union context *host_ctx, *guest_ctx; - int r, idx; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - -again: - if (signal_pending(current)) { - r = -EINTR; - kvm_run->exit_reason = KVM_EXIT_INTR; - goto out; - } - - preempt_disable(); - local_irq_disable(); - - /*Get host and guest context with guest address space.*/ - host_ctx = kvm_get_host_context(vcpu); - guest_ctx = kvm_get_guest_context(vcpu); - - clear_bit(KVM_REQ_KICK, &vcpu->requests); - - r = kvm_vcpu_pre_transition(vcpu); - if (r < 0) - goto vcpu_run_fail; - - srcu_read_unlock(&vcpu->kvm->srcu, idx); - vcpu->mode = IN_GUEST_MODE; - kvm_guest_enter(); - - /* - * Transition to the guest - */ - kvm_vmm_info->tramp_entry(host_ctx, guest_ctx); - - kvm_vcpu_post_transition(vcpu); - - vcpu->arch.launched = 1; - set_bit(KVM_REQ_KICK, &vcpu->requests); - local_irq_enable(); - - /* - * We must have an instruction between local_irq_enable() and - * kvm_guest_exit(), so the timer interrupt isn't delayed by - * the interrupt shadow. The stat.exits increment will do nicely. - * But we need to prevent reordering, hence this barrier(): - */ - barrier(); - kvm_guest_exit(); - vcpu->mode = OUTSIDE_GUEST_MODE; - preempt_enable(); - - idx = srcu_read_lock(&vcpu->kvm->srcu); - - r = kvm_handle_exit(kvm_run, vcpu); - - if (r > 0) { - if (!need_resched()) - goto again; - } - -out: - srcu_read_unlock(&vcpu->kvm->srcu, idx); - if (r > 0) { - cond_resched(); - idx = srcu_read_lock(&vcpu->kvm->srcu); - goto again; - } - - return r; - -vcpu_run_fail: - local_irq_enable(); - preempt_enable(); - kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; - goto out; -} - -static void kvm_set_mmio_data(struct kvm_vcpu *vcpu) -{ - struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu); - - if (!vcpu->mmio_is_write) - memcpy(&p->data, vcpu->arch.mmio_data, 8); - p->state = STATE_IORESP_READY; -} - -int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) -{ - int r; - sigset_t sigsaved; - - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); - - if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { - kvm_vcpu_block(vcpu); - clear_bit(KVM_REQ_UNHALT, &vcpu->requests); - r = -EAGAIN; - goto out; - } - - if (vcpu->mmio_needed) { - memcpy(vcpu->arch.mmio_data, kvm_run->mmio.data, 8); - kvm_set_mmio_data(vcpu); - vcpu->mmio_read_completed = 1; - vcpu->mmio_needed = 0; - } - r = __vcpu_run(vcpu, kvm_run); -out: - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &sigsaved, NULL); - - return r; -} - -struct kvm *kvm_arch_alloc_vm(void) -{ - - struct kvm *kvm; - uint64_t vm_base; - - BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE); - - vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE)); - - if (!vm_base) - return NULL; - - memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); - kvm = (struct kvm *)(vm_base + - offsetof(struct kvm_vm_data, kvm_vm_struct)); - kvm->arch.vm_base = vm_base; - printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base); - - return kvm; -} - -struct kvm_ia64_io_range { - unsigned long start; - unsigned long size; - unsigned long type; -}; - -static const struct kvm_ia64_io_range io_ranges[] = { - {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER}, - {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO}, - {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO}, - {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC}, - {PIB_START, PIB_SIZE, GPFN_PIB}, -}; - -static void kvm_build_io_pmt(struct kvm *kvm) -{ - unsigned long i, j; - - /* Mark I/O ranges */ - for (i = 0; i < (sizeof(io_ranges) / sizeof(struct kvm_io_range)); - i++) { - for (j = io_ranges[i].start; - j < io_ranges[i].start + io_ranges[i].size; - j += PAGE_SIZE) - kvm_set_pmt_entry(kvm, j >> PAGE_SHIFT, - io_ranges[i].type, 0); - } - -} - -/*Use unused rids to virtualize guest rid.*/ -#define GUEST_PHYSICAL_RR0 0x1739 -#define GUEST_PHYSICAL_RR4 0x2739 -#define VMM_INIT_RR 0x1660 - -int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) -{ - BUG_ON(!kvm); - - if (type) - return -EINVAL; - - kvm->arch.is_sn2 = ia64_platform_is("sn2"); - - kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0; - kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4; - kvm->arch.vmm_init_rr = VMM_INIT_RR; - - /* - *Fill P2M entries for MMIO/IO ranges - */ - kvm_build_io_pmt(kvm); - - INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); - - /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ - set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); - - return 0; -} - -static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, - struct kvm_irqchip *chip) -{ - int r; - - r = 0; - switch (chip->chip_id) { - case KVM_IRQCHIP_IOAPIC: - r = kvm_get_ioapic(kvm, &chip->chip.ioapic); - break; - default: - r = -EINVAL; - break; - } - return r; -} - -static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) -{ - int r; - - r = 0; - switch (chip->chip_id) { - case KVM_IRQCHIP_IOAPIC: - r = kvm_set_ioapic(kvm, &chip->chip.ioapic); - break; - default: - r = -EINVAL; - break; - } - return r; -} - -#define RESTORE_REGS(_x) vcpu->arch._x = regs->_x - -int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) -{ - struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); - int i; - - for (i = 0; i < 16; i++) { - vpd->vgr[i] = regs->vpd.vgr[i]; - vpd->vbgr[i] = regs->vpd.vbgr[i]; - } - for (i = 0; i < 128; i++) - vpd->vcr[i] = regs->vpd.vcr[i]; - vpd->vhpi = regs->vpd.vhpi; - vpd->vnat = regs->vpd.vnat; - vpd->vbnat = regs->vpd.vbnat; - vpd->vpsr = regs->vpd.vpsr; - - vpd->vpr = regs->vpd.vpr; - - memcpy(&vcpu->arch.guest, ®s->saved_guest, sizeof(union context)); - - RESTORE_REGS(mp_state); - RESTORE_REGS(vmm_rr); - memcpy(vcpu->arch.itrs, regs->itrs, sizeof(struct thash_data) * NITRS); - memcpy(vcpu->arch.dtrs, regs->dtrs, sizeof(struct thash_data) * NDTRS); - RESTORE_REGS(itr_regions); - RESTORE_REGS(dtr_regions); - RESTORE_REGS(tc_regions); - RESTORE_REGS(irq_check); - RESTORE_REGS(itc_check); - RESTORE_REGS(timer_check); - RESTORE_REGS(timer_pending); - RESTORE_REGS(last_itc); - for (i = 0; i < 8; i++) { - vcpu->arch.vrr[i] = regs->vrr[i]; - vcpu->arch.ibr[i] = regs->ibr[i]; - vcpu->arch.dbr[i] = regs->dbr[i]; - } - for (i = 0; i < 4; i++) - vcpu->arch.insvc[i] = regs->insvc[i]; - RESTORE_REGS(xtp); - RESTORE_REGS(metaphysical_rr0); - RESTORE_REGS(metaphysical_rr4); - RESTORE_REGS(metaphysical_saved_rr0); - RESTORE_REGS(metaphysical_saved_rr4); - RESTORE_REGS(fp_psr); - RESTORE_REGS(saved_gp); - - vcpu->arch.irq_new_pending = 1; - vcpu->arch.itc_offset = regs->saved_itc - kvm_get_itc(vcpu); - set_bit(KVM_REQ_RESUME, &vcpu->requests); - - return 0; -} - -int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, - bool line_status) -{ - if (!irqchip_in_kernel(kvm)) - return -ENXIO; - - irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, - irq_event->irq, irq_event->level, - line_status); - return 0; -} - -long kvm_arch_vm_ioctl(struct file *filp, - unsigned int ioctl, unsigned long arg) -{ - struct kvm *kvm = filp->private_data; - void __user *argp = (void __user *)arg; - int r = -ENOTTY; - - switch (ioctl) { - case KVM_CREATE_IRQCHIP: - r = -EFAULT; - r = kvm_ioapic_init(kvm); - if (r) - goto out; - r = kvm_setup_default_irq_routing(kvm); - if (r) { - mutex_lock(&kvm->slots_lock); - kvm_ioapic_destroy(kvm); - mutex_unlock(&kvm->slots_lock); - goto out; - } - break; - case KVM_GET_IRQCHIP: { - /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ - struct kvm_irqchip chip; - - r = -EFAULT; - if (copy_from_user(&chip, argp, sizeof chip)) - goto out; - r = -ENXIO; - if (!irqchip_in_kernel(kvm)) - goto out; - r = kvm_vm_ioctl_get_irqchip(kvm, &chip); - if (r) - goto out; - r = -EFAULT; - if (copy_to_user(argp, &chip, sizeof chip)) - goto out; - r = 0; - break; - } - case KVM_SET_IRQCHIP: { - /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ - struct kvm_irqchip chip; - - r = -EFAULT; - if (copy_from_user(&chip, argp, sizeof chip)) - goto out; - r = -ENXIO; - if (!irqchip_in_kernel(kvm)) - goto out; - r = kvm_vm_ioctl_set_irqchip(kvm, &chip); - if (r) - goto out; - r = 0; - break; - } - default: - ; - } -out: - return r; -} - -int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, - struct kvm_sregs *sregs) -{ - return -EINVAL; -} - -int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, - struct kvm_sregs *sregs) -{ - return -EINVAL; - -} -int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, - struct kvm_translation *tr) -{ - - return -EINVAL; -} - -static int kvm_alloc_vmm_area(void) -{ - if (!kvm_vmm_base && (kvm_vm_buffer_size < KVM_VM_BUFFER_SIZE)) { - kvm_vmm_base = __get_free_pages(GFP_KERNEL, - get_order(KVM_VMM_SIZE)); - if (!kvm_vmm_base) - return -ENOMEM; - - memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE); - kvm_vm_buffer = kvm_vmm_base + VMM_SIZE; - - printk(KERN_DEBUG"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n", - kvm_vmm_base, kvm_vm_buffer); - } - - return 0; -} - -static void kvm_free_vmm_area(void) -{ - if (kvm_vmm_base) { - /*Zero this area before free to avoid bits leak!!*/ - memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE); - free_pages(kvm_vmm_base, get_order(KVM_VMM_SIZE)); - kvm_vmm_base = 0; - kvm_vm_buffer = 0; - kvm_vsa_base = 0; - } -} - -static int vti_init_vpd(struct kvm_vcpu *vcpu) -{ - int i; - union cpuid3_t cpuid3; - struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); - - if (IS_ERR(vpd)) - return PTR_ERR(vpd); - - /* CPUID init */ - for (i = 0; i < 5; i++) - vpd->vcpuid[i] = ia64_get_cpuid(i); - - /* Limit the CPUID number to 5 */ - cpuid3.value = vpd->vcpuid[3]; - cpuid3.number = 4; /* 5 - 1 */ - vpd->vcpuid[3] = cpuid3.value; - - /*Set vac and vdc fields*/ - vpd->vac.a_from_int_cr = 1; - vpd->vac.a_to_int_cr = 1; - vpd->vac.a_from_psr = 1; - vpd->vac.a_from_cpuid = 1; - vpd->vac.a_cover = 1; - vpd->vac.a_bsw = 1; - vpd->vac.a_int = 1; - vpd->vdc.d_vmsw = 1; - - /*Set virtual buffer*/ - vpd->virt_env_vaddr = KVM_VM_BUFFER_BASE; - - return 0; -} - -static int vti_create_vp(struct kvm_vcpu *vcpu) -{ - long ret; - struct vpd *vpd = vcpu->arch.vpd; - unsigned long vmm_ivt; - - vmm_ivt = kvm_vmm_info->vmm_ivt; - - printk(KERN_DEBUG "kvm: vcpu:%p,ivt: 0x%lx\n", vcpu, vmm_ivt); - - ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)vmm_ivt, 0); - - if (ret) { - printk(KERN_ERR"kvm: ia64_pal_vp_create failed!\n"); - return -EINVAL; - } - return 0; -} - -static void init_ptce_info(struct kvm_vcpu *vcpu) -{ - ia64_ptce_info_t ptce = {0}; - - ia64_get_ptce(&ptce); - vcpu->arch.ptce_base = ptce.base; - vcpu->arch.ptce_count[0] = ptce.count[0]; - vcpu->arch.ptce_count[1] = ptce.count[1]; - vcpu->arch.ptce_stride[0] = ptce.stride[0]; - vcpu->arch.ptce_stride[1] = ptce.stride[1]; -} - -static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu) -{ - struct hrtimer *p_ht = &vcpu->arch.hlt_timer; - - if (hrtimer_cancel(p_ht)) - hrtimer_start_expires(p_ht, HRTIMER_MODE_ABS); -} - -static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data) -{ - struct kvm_vcpu *vcpu; - wait_queue_head_t *q; - - vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer); - q = &vcpu->wq; - - if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED) - goto out; - - if (waitqueue_active(q)) - wake_up_interruptible(q); - -out: - vcpu->arch.timer_fired = 1; - vcpu->arch.timer_check = 1; - return HRTIMER_NORESTART; -} - -#define PALE_RESET_ENTRY 0x80000000ffffffb0UL - -bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) -{ - return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL); -} - -int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) -{ - struct kvm_vcpu *v; - int r; - int i; - long itc_offset; - struct kvm *kvm = vcpu->kvm; - struct kvm_pt_regs *regs = vcpu_regs(vcpu); - - union context *p_ctx = &vcpu->arch.guest; - struct kvm_vcpu *vmm_vcpu = to_guest(vcpu->kvm, vcpu); - - /*Init vcpu context for first run.*/ - if (IS_ERR(vmm_vcpu)) - return PTR_ERR(vmm_vcpu); - - if (kvm_vcpu_is_bsp(vcpu)) { - vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; - - /*Set entry address for first run.*/ - regs->cr_iip = PALE_RESET_ENTRY; - - /*Initialize itc offset for vcpus*/ - itc_offset = 0UL - kvm_get_itc(vcpu); - for (i = 0; i < KVM_MAX_VCPUS; i++) { - v = (struct kvm_vcpu *)((char *)vcpu + - sizeof(struct kvm_vcpu_data) * i); - v->arch.itc_offset = itc_offset; - v->arch.last_itc = 0; - } - } else - vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; - - r = -ENOMEM; - vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL); - if (!vcpu->arch.apic) - goto out; - vcpu->arch.apic->vcpu = vcpu; - - p_ctx->gr[1] = 0; - p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + KVM_STK_OFFSET); - p_ctx->gr[13] = (unsigned long)vmm_vcpu; - p_ctx->psr = 0x1008522000UL; - p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/ - p_ctx->caller_unat = 0; - p_ctx->pr = 0x0; - p_ctx->ar[36] = 0x0; /*unat*/ - p_ctx->ar[19] = 0x0; /*rnat*/ - p_ctx->ar[18] = (unsigned long)vmm_vcpu + - ((sizeof(struct kvm_vcpu)+15) & ~15); - p_ctx->ar[64] = 0x0; /*pfs*/ - p_ctx->cr[0] = 0x7e04UL; - p_ctx->cr[2] = (unsigned long)kvm_vmm_info->vmm_ivt; - p_ctx->cr[8] = 0x3c; - - /*Initialize region register*/ - p_ctx->rr[0] = 0x30; - p_ctx->rr[1] = 0x30; - p_ctx->rr[2] = 0x30; - p_ctx->rr[3] = 0x30; - p_ctx->rr[4] = 0x30; - p_ctx->rr[5] = 0x30; - p_ctx->rr[7] = 0x30; - - /*Initialize branch register 0*/ - p_ctx->br[0] = *(unsigned long *)kvm_vmm_info->vmm_entry; - - vcpu->arch.vmm_rr = kvm->arch.vmm_init_rr; - vcpu->arch.metaphysical_rr0 = kvm->arch.metaphysical_rr0; - vcpu->arch.metaphysical_rr4 = kvm->arch.metaphysical_rr4; - - hrtimer_init(&vcpu->arch.hlt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); - vcpu->arch.hlt_timer.function = hlt_timer_fn; - - vcpu->arch.last_run_cpu = -1; - vcpu->arch.vpd = (struct vpd *)VPD_BASE(vcpu->vcpu_id); - vcpu->arch.vsa_base = kvm_vsa_base; - vcpu->arch.__gp = kvm_vmm_gp; - vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock); - vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_BASE(vcpu->vcpu_id); - vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_BASE(vcpu->vcpu_id); - init_ptce_info(vcpu); - - r = 0; -out: - return r; -} - -static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id) -{ - unsigned long psr; - int r; - - local_irq_save(psr); - r = kvm_insert_vmm_mapping(vcpu); - local_irq_restore(psr); - if (r) - goto fail; - r = kvm_vcpu_init(vcpu, vcpu->kvm, id); - if (r) - goto fail; - - r = vti_init_vpd(vcpu); - if (r) { - printk(KERN_DEBUG"kvm: vpd init error!!\n"); - goto uninit; - } - - r = vti_create_vp(vcpu); - if (r) - goto uninit; - - kvm_purge_vmm_mapping(vcpu); - - return 0; -uninit: - kvm_vcpu_uninit(vcpu); -fail: - return r; -} - -struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, - unsigned int id) -{ - struct kvm_vcpu *vcpu; - unsigned long vm_base = kvm->arch.vm_base; - int r; - int cpu; - - BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2); - - r = -EINVAL; - if (id >= KVM_MAX_VCPUS) { - printk(KERN_ERR"kvm: Can't configure vcpus > %ld", - KVM_MAX_VCPUS); - goto fail; - } - - r = -ENOMEM; - if (!vm_base) { - printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id); - goto fail; - } - vcpu = (struct kvm_vcpu *)(vm_base + offsetof(struct kvm_vm_data, - vcpu_data[id].vcpu_struct)); - vcpu->kvm = kvm; - - cpu = get_cpu(); - r = vti_vcpu_setup(vcpu, id); - put_cpu(); - - if (r) { - printk(KERN_DEBUG"kvm: vcpu_setup error!!\n"); - goto fail; - } - - return vcpu; -fail: - return ERR_PTR(r); -} - -int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) -{ - return 0; -} - -int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) -{ - return 0; -} - -int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) -{ - return -EINVAL; -} - -int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) -{ - return -EINVAL; -} - -int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, - struct kvm_guest_debug *dbg) -{ - return -EINVAL; -} - -void kvm_arch_free_vm(struct kvm *kvm) -{ - unsigned long vm_base = kvm->arch.vm_base; - - if (vm_base) { - memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); - free_pages(vm_base, get_order(KVM_VM_DATA_SIZE)); - } - -} - -static void kvm_release_vm_pages(struct kvm *kvm) -{ - struct kvm_memslots *slots; - struct kvm_memory_slot *memslot; - int j; - - slots = kvm_memslots(kvm); - kvm_for_each_memslot(memslot, slots) { - for (j = 0; j < memslot->npages; j++) { - if (memslot->rmap[j]) - put_page((struct page *)memslot->rmap[j]); - } - } -} - -void kvm_arch_destroy_vm(struct kvm *kvm) -{ - kvm_iommu_unmap_guest(kvm); - kvm_free_all_assigned_devices(kvm); - kfree(kvm->arch.vioapic); - kvm_release_vm_pages(kvm); -} - -void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) -{ - if (cpu != vcpu->cpu) { - vcpu->cpu = cpu; - if (vcpu->arch.ht_active) - kvm_migrate_hlt_timer(vcpu); - } -} - -#define SAVE_REGS(_x) regs->_x = vcpu->arch._x - -int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) -{ - struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); - int i; - - vcpu_load(vcpu); - - for (i = 0; i < 16; i++) { - regs->vpd.vgr[i] = vpd->vgr[i]; - regs->vpd.vbgr[i] = vpd->vbgr[i]; - } - for (i = 0; i < 128; i++) - regs->vpd.vcr[i] = vpd->vcr[i]; - regs->vpd.vhpi = vpd->vhpi; - regs->vpd.vnat = vpd->vnat; - regs->vpd.vbnat = vpd->vbnat; - regs->vpd.vpsr = vpd->vpsr; - regs->vpd.vpr = vpd->vpr; - - memcpy(®s->saved_guest, &vcpu->arch.guest, sizeof(union context)); - - SAVE_REGS(mp_state); - SAVE_REGS(vmm_rr); - memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS); - memcpy(regs->dtrs, vcpu->arch.dtrs, sizeof(struct thash_data) * NDTRS); - SAVE_REGS(itr_regions); - SAVE_REGS(dtr_regions); - SAVE_REGS(tc_regions); - SAVE_REGS(irq_check); - SAVE_REGS(itc_check); - SAVE_REGS(timer_check); - SAVE_REGS(timer_pending); - SAVE_REGS(last_itc); - for (i = 0; i < 8; i++) { - regs->vrr[i] = vcpu->arch.vrr[i]; - regs->ibr[i] = vcpu->arch.ibr[i]; - regs->dbr[i] = vcpu->arch.dbr[i]; - } - for (i = 0; i < 4; i++) - regs->insvc[i] = vcpu->arch.insvc[i]; - regs->saved_itc = vcpu->arch.itc_offset + kvm_get_itc(vcpu); - SAVE_REGS(xtp); - SAVE_REGS(metaphysical_rr0); - SAVE_REGS(metaphysical_rr4); - SAVE_REGS(metaphysical_saved_rr0); - SAVE_REGS(metaphysical_saved_rr4); - SAVE_REGS(fp_psr); - SAVE_REGS(saved_gp); - - vcpu_put(vcpu); - return 0; -} - -int kvm_arch_vcpu_ioctl_get_stack(struct kvm_vcpu *vcpu, - struct kvm_ia64_vcpu_stack *stack) -{ - memcpy(stack, vcpu, sizeof(struct kvm_ia64_vcpu_stack)); - return 0; -} - -int kvm_arch_vcpu_ioctl_set_stack(struct kvm_vcpu *vcpu, - struct kvm_ia64_vcpu_stack *stack) -{ - memcpy(vcpu + 1, &stack->stack[0] + sizeof(struct kvm_vcpu), - sizeof(struct kvm_ia64_vcpu_stack) - sizeof(struct kvm_vcpu)); - - vcpu->arch.exit_data = ((struct kvm_vcpu *)stack)->arch.exit_data; - return 0; -} - -void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) -{ - - hrtimer_cancel(&vcpu->arch.hlt_timer); - kfree(vcpu->arch.apic); -} - -long kvm_arch_vcpu_ioctl(struct file *filp, - unsigned int ioctl, unsigned long arg) -{ - struct kvm_vcpu *vcpu = filp->private_data; - void __user *argp = (void __user *)arg; - struct kvm_ia64_vcpu_stack *stack = NULL; - long r; - - switch (ioctl) { - case KVM_IA64_VCPU_GET_STACK: { - struct kvm_ia64_vcpu_stack __user *user_stack; - void __user *first_p = argp; - - r = -EFAULT; - if (copy_from_user(&user_stack, first_p, sizeof(void *))) - goto out; - - if (!access_ok(VERIFY_WRITE, user_stack, - sizeof(struct kvm_ia64_vcpu_stack))) { - printk(KERN_INFO "KVM_IA64_VCPU_GET_STACK: " - "Illegal user destination address for stack\n"); - goto out; - } - stack = kzalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL); - if (!stack) { - r = -ENOMEM; - goto out; - } - - r = kvm_arch_vcpu_ioctl_get_stack(vcpu, stack); - if (r) - goto out; - - if (copy_to_user(user_stack, stack, - sizeof(struct kvm_ia64_vcpu_stack))) { - r = -EFAULT; - goto out; - } - - break; - } - case KVM_IA64_VCPU_SET_STACK: { - struct kvm_ia64_vcpu_stack __user *user_stack; - void __user *first_p = argp; - - r = -EFAULT; - if (copy_from_user(&user_stack, first_p, sizeof(void *))) - goto out; - - if (!access_ok(VERIFY_READ, user_stack, - sizeof(struct kvm_ia64_vcpu_stack))) { - printk(KERN_INFO "KVM_IA64_VCPU_SET_STACK: " - "Illegal user address for stack\n"); - goto out; - } - stack = kmalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL); - if (!stack) { - r = -ENOMEM; - goto out; - } - if (copy_from_user(stack, user_stack, - sizeof(struct kvm_ia64_vcpu_stack))) - goto out; - - r = kvm_arch_vcpu_ioctl_set_stack(vcpu, stack); - break; - } - - default: - r = -EINVAL; - } - -out: - kfree(stack); - return r; -} - -int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) -{ - return VM_FAULT_SIGBUS; -} - -int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, - unsigned long npages) -{ - return 0; -} - -int kvm_arch_prepare_memory_region(struct kvm *kvm, - struct kvm_memory_slot *memslot, - struct kvm_userspace_memory_region *mem, - enum kvm_mr_change change) -{ - unsigned long i; - unsigned long pfn; - int npages = memslot->npages; - unsigned long base_gfn = memslot->base_gfn; - - if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT)) - return -ENOMEM; - - for (i = 0; i < npages; i++) { - pfn = gfn_to_pfn(kvm, base_gfn + i); - if (!kvm_is_mmio_pfn(pfn)) { - kvm_set_pmt_entry(kvm, base_gfn + i, - pfn << PAGE_SHIFT, - _PAGE_AR_RWX | _PAGE_MA_WB); - memslot->rmap[i] = (unsigned long)pfn_to_page(pfn); - } else { - kvm_set_pmt_entry(kvm, base_gfn + i, - GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT), - _PAGE_MA_UC); - memslot->rmap[i] = 0; - } - } - - return 0; -} - -void kvm_arch_flush_shadow_all(struct kvm *kvm) -{ - kvm_flush_remote_tlbs(kvm); -} - -void kvm_arch_flush_shadow_memslot(struct kvm *kvm, - struct kvm_memory_slot *slot) -{ - kvm_arch_flush_shadow_all(); -} - -long kvm_arch_dev_ioctl(struct file *filp, - unsigned int ioctl, unsigned long arg) -{ - return -EINVAL; -} - -void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) -{ - kvm_vcpu_uninit(vcpu); -} - -static int vti_cpu_has_kvm_support(void) -{ - long avail = 1, status = 1, control = 1; - long ret; - - ret = ia64_pal_proc_get_features(&avail, &status, &control, 0); - if (ret) - goto out; - - if (!(avail & PAL_PROC_VM_BIT)) - goto out; - - printk(KERN_DEBUG"kvm: Hardware Supports VT\n"); - - ret = ia64_pal_vp_env_info(&kvm_vm_buffer_size, &vp_env_info); - if (ret) - goto out; - printk(KERN_DEBUG"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size); - - if (!(vp_env_info & VP_OPCODE)) { - printk(KERN_WARNING"kvm: No opcode ability on hardware, " - "vm_env_info:0x%lx\n", vp_env_info); - } - - return 1; -out: - return 0; -} - - -/* - * On SN2, the ITC isn't stable, so copy in fast path code to use the - * SN2 RTC, replacing the ITC based default verion. - */ -static void kvm_patch_vmm(struct kvm_vmm_info *vmm_info, - struct module *module) -{ - unsigned long new_ar, new_ar_sn2; - unsigned long module_base; - - if (!ia64_platform_is("sn2")) - return; - - module_base = (unsigned long)module->module_core; - - new_ar = kvm_vmm_base + vmm_info->patch_mov_ar - module_base; - new_ar_sn2 = kvm_vmm_base + vmm_info->patch_mov_ar_sn2 - module_base; - - printk(KERN_INFO "kvm: Patching ITC emulation to use SGI SN2 RTC " - "as source\n"); - - /* - * Copy the SN2 version of mov_ar into place. They are both - * the same size, so 6 bundles is sufficient (6 * 0x10). - */ - memcpy((void *)new_ar, (void *)new_ar_sn2, 0x60); -} - -static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info, - struct module *module) -{ - unsigned long module_base; - unsigned long vmm_size; - - unsigned long vmm_offset, func_offset, fdesc_offset; - struct fdesc *p_fdesc; - - BUG_ON(!module); - - if (!kvm_vmm_base) { - printk("kvm: kvm area hasn't been initialized yet!!\n"); - return -EFAULT; - } - - /*Calculate new position of relocated vmm module.*/ - module_base = (unsigned long)module->module_core; - vmm_size = module->core_size; - if (unlikely(vmm_size > KVM_VMM_SIZE)) - return -EFAULT; - - memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size); - kvm_patch_vmm(vmm_info, module); - kvm_flush_icache(kvm_vmm_base, vmm_size); - - /*Recalculate kvm_vmm_info based on new VMM*/ - vmm_offset = vmm_info->vmm_ivt - module_base; - kvm_vmm_info->vmm_ivt = KVM_VMM_BASE + vmm_offset; - printk(KERN_DEBUG"kvm: Relocated VMM's IVT Base Addr:%lx\n", - kvm_vmm_info->vmm_ivt); - - fdesc_offset = (unsigned long)vmm_info->vmm_entry - module_base; - kvm_vmm_info->vmm_entry = (kvm_vmm_entry *)(KVM_VMM_BASE + - fdesc_offset); - func_offset = *(unsigned long *)vmm_info->vmm_entry - module_base; - p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset); - p_fdesc->ip = KVM_VMM_BASE + func_offset; - p_fdesc->gp = KVM_VMM_BASE+(p_fdesc->gp - module_base); - - printk(KERN_DEBUG"kvm: Relocated VMM's Init Entry Addr:%lx\n", - KVM_VMM_BASE+func_offset); - - fdesc_offset = (unsigned long)vmm_info->tramp_entry - module_base; - kvm_vmm_info->tramp_entry = (kvm_tramp_entry *)(KVM_VMM_BASE + - fdesc_offset); - func_offset = *(unsigned long *)vmm_info->tramp_entry - module_base; - p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset); - p_fdesc->ip = KVM_VMM_BASE + func_offset; - p_fdesc->gp = KVM_VMM_BASE + (p_fdesc->gp - module_base); - - kvm_vmm_gp = p_fdesc->gp; - - printk(KERN_DEBUG"kvm: Relocated VMM's Entry IP:%p\n", - kvm_vmm_info->vmm_entry); - printk(KERN_DEBUG"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n", - KVM_VMM_BASE + func_offset); - - return 0; -} - -int kvm_arch_init(void *opaque) -{ - int r; - struct kvm_vmm_info *vmm_info = (struct kvm_vmm_info *)opaque; - - if (!vti_cpu_has_kvm_support()) { - printk(KERN_ERR "kvm: No Hardware Virtualization Support!\n"); - r = -EOPNOTSUPP; - goto out; - } - - if (kvm_vmm_info) { - printk(KERN_ERR "kvm: Already loaded VMM module!\n"); - r = -EEXIST; - goto out; - } - - r = -ENOMEM; - kvm_vmm_info = kzalloc(sizeof(struct kvm_vmm_info), GFP_KERNEL); - if (!kvm_vmm_info) - goto out; - - if (kvm_alloc_vmm_area()) - goto out_free0; - - r = kvm_relocate_vmm(vmm_info, vmm_info->module); - if (r) - goto out_free1; - - return 0; - -out_free1: - kvm_free_vmm_area(); -out_free0: - kfree(kvm_vmm_info); -out: - return r; -} - -void kvm_arch_exit(void) -{ - kvm_free_vmm_area(); - kfree(kvm_vmm_info); - kvm_vmm_info = NULL; -} - -static void kvm_ia64_sync_dirty_log(struct kvm *kvm, - struct kvm_memory_slot *memslot) -{ - int i; - long base; - unsigned long n; - unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + - offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); - - n = kvm_dirty_bitmap_bytes(memslot); - base = memslot->base_gfn / BITS_PER_LONG; - - spin_lock(&kvm->arch.dirty_log_lock); - for (i = 0; i < n/sizeof(long); ++i) { - memslot->dirty_bitmap[i] = dirty_bitmap[base + i]; - dirty_bitmap[base + i] = 0; - } - spin_unlock(&kvm->arch.dirty_log_lock); -} - -int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, - struct kvm_dirty_log *log) -{ - int r; - unsigned long n; - struct kvm_memory_slot *memslot; - int is_dirty = 0; - - mutex_lock(&kvm->slots_lock); - - r = -EINVAL; - if (log->slot >= KVM_USER_MEM_SLOTS) - goto out; - - memslot = id_to_memslot(kvm->memslots, log->slot); - r = -ENOENT; - if (!memslot->dirty_bitmap) - goto out; - - kvm_ia64_sync_dirty_log(kvm, memslot); - r = kvm_get_dirty_log(kvm, log, &is_dirty); - if (r) - goto out; - - /* If nothing is dirty, don't bother messing with page tables. */ - if (is_dirty) { - kvm_flush_remote_tlbs(kvm); - n = kvm_dirty_bitmap_bytes(memslot); - memset(memslot->dirty_bitmap, 0, n); - } - r = 0; -out: - mutex_unlock(&kvm->slots_lock); - return r; -} - -int kvm_arch_hardware_setup(void) -{ - return 0; -} - -int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq) -{ - return __apic_accept_irq(vcpu, irq->vector); -} - -int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest) -{ - return apic->vcpu->vcpu_id == dest; -} - -int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda) -{ - return 0; -} - -int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2) -{ - return vcpu1->arch.xtp - vcpu2->arch.xtp; -} - -int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, - int short_hand, int dest, int dest_mode) -{ - struct kvm_lapic *target = vcpu->arch.apic; - return (dest_mode == 0) ? - kvm_apic_match_physical_addr(target, dest) : - kvm_apic_match_logical_addr(target, dest); -} - -static int find_highest_bits(int *dat) -{ - u32 bits, bitnum; - int i; - - /* loop for all 256 bits */ - for (i = 7; i >= 0 ; i--) { - bits = dat[i]; - if (bits) { - bitnum = fls(bits); - return i * 32 + bitnum - 1; - } - } - - return -1; -} - -int kvm_highest_pending_irq(struct kvm_vcpu *vcpu) -{ - struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); - - if (vpd->irr[0] & (1UL << NMI_VECTOR)) - return NMI_VECTOR; - if (vpd->irr[0] & (1UL << ExtINT_VECTOR)) - return ExtINT_VECTOR; - - return find_highest_bits((int *)&vpd->irr[0]); -} - -int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) -{ - return vcpu->arch.timer_fired; -} - -int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) -{ - return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) || - (kvm_highest_pending_irq(vcpu) != -1); -} - -int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) -{ - return (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests)); -} - -int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, - struct kvm_mp_state *mp_state) -{ - mp_state->mp_state = vcpu->arch.mp_state; - return 0; -} - -static int vcpu_reset(struct kvm_vcpu *vcpu) -{ - int r; - long psr; - local_irq_save(psr); - r = kvm_insert_vmm_mapping(vcpu); - local_irq_restore(psr); - if (r) - goto fail; - - vcpu->arch.launched = 0; - kvm_arch_vcpu_uninit(vcpu); - r = kvm_arch_vcpu_init(vcpu); - if (r) - goto fail; - - kvm_purge_vmm_mapping(vcpu); - r = 0; -fail: - return r; -} - -int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, - struct kvm_mp_state *mp_state) -{ - int r = 0; - - vcpu->arch.mp_state = mp_state->mp_state; - if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED) - r = vcpu_reset(vcpu); - return r; -} diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c deleted file mode 100644 index cb548ee9fcae..000000000000 --- a/arch/ia64/kvm/kvm_fw.c +++ /dev/null @@ -1,674 +0,0 @@ -/* - * PAL/SAL call delegation - * - * Copyright (c) 2004 Li Susie - * Copyright (c) 2005 Yu Ke - * Copyright (c) 2007 Xiantao Zhang - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - */ - -#include -#include -#include -#include -#include - -#include "vti.h" -#include "misc.h" - -#include -#include -#include - -/* - * Handy macros to make sure that the PAL return values start out - * as something meaningful. - */ -#define INIT_PAL_STATUS_UNIMPLEMENTED(x) \ - { \ - x.status = PAL_STATUS_UNIMPLEMENTED; \ - x.v0 = 0; \ - x.v1 = 0; \ - x.v2 = 0; \ - } - -#define INIT_PAL_STATUS_SUCCESS(x) \ - { \ - x.status = PAL_STATUS_SUCCESS; \ - x.v0 = 0; \ - x.v1 = 0; \ - x.v2 = 0; \ - } - -static void kvm_get_pal_call_data(struct kvm_vcpu *vcpu, - u64 *gr28, u64 *gr29, u64 *gr30, u64 *gr31) { - struct exit_ctl_data *p; - - if (vcpu) { - p = &vcpu->arch.exit_data; - if (p->exit_reason == EXIT_REASON_PAL_CALL) { - *gr28 = p->u.pal_data.gr28; - *gr29 = p->u.pal_data.gr29; - *gr30 = p->u.pal_data.gr30; - *gr31 = p->u.pal_data.gr31; - return ; - } - } - printk(KERN_DEBUG"Failed to get vcpu pal data!!!\n"); -} - -static void set_pal_result(struct kvm_vcpu *vcpu, - struct ia64_pal_retval result) { - - struct exit_ctl_data *p; - - p = kvm_get_exit_data(vcpu); - if (p->exit_reason == EXIT_REASON_PAL_CALL) { - p->u.pal_data.ret = result; - return ; - } - INIT_PAL_STATUS_UNIMPLEMENTED(p->u.pal_data.ret); -} - -static void set_sal_result(struct kvm_vcpu *vcpu, - struct sal_ret_values result) { - struct exit_ctl_data *p; - - p = kvm_get_exit_data(vcpu); - if (p->exit_reason == EXIT_REASON_SAL_CALL) { - p->u.sal_data.ret = result; - return ; - } - printk(KERN_WARNING"Failed to set sal result!!\n"); -} - -struct cache_flush_args { - u64 cache_type; - u64 operation; - u64 progress; - long status; -}; - -cpumask_t cpu_cache_coherent_map; - -static void remote_pal_cache_flush(void *data) -{ - struct cache_flush_args *args = data; - long status; - u64 progress = args->progress; - - status = ia64_pal_cache_flush(args->cache_type, args->operation, - &progress, NULL); - if (status != 0) - args->status = status; -} - -static struct ia64_pal_retval pal_cache_flush(struct kvm_vcpu *vcpu) -{ - u64 gr28, gr29, gr30, gr31; - struct ia64_pal_retval result = {0, 0, 0, 0}; - struct cache_flush_args args = {0, 0, 0, 0}; - long psr; - - gr28 = gr29 = gr30 = gr31 = 0; - kvm_get_pal_call_data(vcpu, &gr28, &gr29, &gr30, &gr31); - - if (gr31 != 0) - printk(KERN_ERR"vcpu:%p called cache_flush error!\n", vcpu); - - /* Always call Host Pal in int=1 */ - gr30 &= ~PAL_CACHE_FLUSH_CHK_INTRS; - args.cache_type = gr29; - args.operation = gr30; - smp_call_function(remote_pal_cache_flush, - (void *)&args, 1); - if (args.status != 0) - printk(KERN_ERR"pal_cache_flush error!," - "status:0x%lx\n", args.status); - /* - * Call Host PAL cache flush - * Clear psr.ic when call PAL_CACHE_FLUSH - */ - local_irq_save(psr); - result.status = ia64_pal_cache_flush(gr29, gr30, &result.v1, - &result.v0); - local_irq_restore(psr); - if (result.status != 0) - printk(KERN_ERR"vcpu:%p crashed due to cache_flush err:%ld" - "in1:%lx,in2:%lx\n", - vcpu, result.status, gr29, gr30); - -#if 0 - if (gr29 == PAL_CACHE_TYPE_COHERENT) { - cpus_setall(vcpu->arch.cache_coherent_map); - cpu_clear(vcpu->cpu, vcpu->arch.cache_coherent_map); - cpus_setall(cpu_cache_coherent_map); - cpu_clear(vcpu->cpu, cpu_cache_coherent_map); - } -#endif - return result; -} - -struct ia64_pal_retval pal_cache_summary(struct kvm_vcpu *vcpu) -{ - - struct ia64_pal_retval result; - - PAL_CALL(result, PAL_CACHE_SUMMARY, 0, 0, 0); - return result; -} - -static struct ia64_pal_retval pal_freq_base(struct kvm_vcpu *vcpu) -{ - - struct ia64_pal_retval result; - - PAL_CALL(result, PAL_FREQ_BASE, 0, 0, 0); - - /* - * PAL_FREQ_BASE may not be implemented in some platforms, - * call SAL instead. - */ - if (result.v0 == 0) { - result.status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM, - &result.v0, - &result.v1); - result.v2 = 0; - } - - return result; -} - -/* - * On the SGI SN2, the ITC isn't stable. Emulation backed by the SN2 - * RTC is used instead. This function patches the ratios from SAL - * to match the RTC before providing them to the guest. - */ -static void sn2_patch_itc_freq_ratios(struct ia64_pal_retval *result) -{ - struct pal_freq_ratio *ratio; - unsigned long sal_freq, sal_drift, factor; - - result->status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM, - &sal_freq, &sal_drift); - ratio = (struct pal_freq_ratio *)&result->v2; - factor = ((sal_freq * 3) + (sn_rtc_cycles_per_second / 2)) / - sn_rtc_cycles_per_second; - - ratio->num = 3; - ratio->den = factor; -} - -static struct ia64_pal_retval pal_freq_ratios(struct kvm_vcpu *vcpu) -{ - struct ia64_pal_retval result; - - PAL_CALL(result, PAL_FREQ_RATIOS, 0, 0, 0); - - if (vcpu->kvm->arch.is_sn2) - sn2_patch_itc_freq_ratios(&result); - - return result; -} - -static struct ia64_pal_retval pal_logical_to_physica(struct kvm_vcpu *vcpu) -{ - struct ia64_pal_retval result; - - INIT_PAL_STATUS_UNIMPLEMENTED(result); - return result; -} - -static struct ia64_pal_retval pal_platform_addr(struct kvm_vcpu *vcpu) -{ - - struct ia64_pal_retval result; - - INIT_PAL_STATUS_SUCCESS(result); - return result; -} - -static struct ia64_pal_retval pal_proc_get_features(struct kvm_vcpu *vcpu) -{ - - struct ia64_pal_retval result = {0, 0, 0, 0}; - long in0, in1, in2, in3; - - kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); - result.status = ia64_pal_proc_get_features(&result.v0, &result.v1, - &result.v2, in2); - - return result; -} - -static struct ia64_pal_retval pal_register_info(struct kvm_vcpu *vcpu) -{ - - struct ia64_pal_retval result = {0, 0, 0, 0}; - long in0, in1, in2, in3; - - kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); - result.status = ia64_pal_register_info(in1, &result.v1, &result.v2); - - return result; -} - -static struct ia64_pal_retval pal_cache_info(struct kvm_vcpu *vcpu) -{ - - pal_cache_config_info_t ci; - long status; - unsigned long in0, in1, in2, in3, r9, r10; - - kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); - status = ia64_pal_cache_config_info(in1, in2, &ci); - r9 = ci.pcci_info_1.pcci1_data; - r10 = ci.pcci_info_2.pcci2_data; - return ((struct ia64_pal_retval){status, r9, r10, 0}); -} - -#define GUEST_IMPL_VA_MSB 59 -#define GUEST_RID_BITS 18 - -static struct ia64_pal_retval pal_vm_summary(struct kvm_vcpu *vcpu) -{ - - pal_vm_info_1_u_t vminfo1; - pal_vm_info_2_u_t vminfo2; - struct ia64_pal_retval result; - - PAL_CALL(result, PAL_VM_SUMMARY, 0, 0, 0); - if (!result.status) { - vminfo1.pvi1_val = result.v0; - vminfo1.pal_vm_info_1_s.max_itr_entry = 8; - vminfo1.pal_vm_info_1_s.max_dtr_entry = 8; - result.v0 = vminfo1.pvi1_val; - vminfo2.pal_vm_info_2_s.impl_va_msb = GUEST_IMPL_VA_MSB; - vminfo2.pal_vm_info_2_s.rid_size = GUEST_RID_BITS; - result.v1 = vminfo2.pvi2_val; - } - - return result; -} - -static struct ia64_pal_retval pal_vm_info(struct kvm_vcpu *vcpu) -{ - struct ia64_pal_retval result; - unsigned long in0, in1, in2, in3; - - kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); - - result.status = ia64_pal_vm_info(in1, in2, - (pal_tc_info_u_t *)&result.v1, &result.v2); - - return result; -} - -static u64 kvm_get_pal_call_index(struct kvm_vcpu *vcpu) -{ - u64 index = 0; - struct exit_ctl_data *p; - - p = kvm_get_exit_data(vcpu); - if (p->exit_reason == EXIT_REASON_PAL_CALL) - index = p->u.pal_data.gr28; - - return index; -} - -static void prepare_for_halt(struct kvm_vcpu *vcpu) -{ - vcpu->arch.timer_pending = 1; - vcpu->arch.timer_fired = 0; -} - -static struct ia64_pal_retval pal_perf_mon_info(struct kvm_vcpu *vcpu) -{ - long status; - unsigned long in0, in1, in2, in3, r9; - unsigned long pm_buffer[16]; - - kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); - status = ia64_pal_perf_mon_info(pm_buffer, - (pal_perf_mon_info_u_t *) &r9); - if (status != 0) { - printk(KERN_DEBUG"PAL_PERF_MON_INFO fails ret=%ld\n", status); - } else { - if (in1) - memcpy((void *)in1, pm_buffer, sizeof(pm_buffer)); - else { - status = PAL_STATUS_EINVAL; - printk(KERN_WARNING"Invalid parameters " - "for PAL call:0x%lx!\n", in0); - } - } - return (struct ia64_pal_retval){status, r9, 0, 0}; -} - -static struct ia64_pal_retval pal_halt_info(struct kvm_vcpu *vcpu) -{ - unsigned long in0, in1, in2, in3; - long status; - unsigned long res = 1000UL | (1000UL << 16) | (10UL << 32) - | (1UL << 61) | (1UL << 60); - - kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); - if (in1) { - memcpy((void *)in1, &res, sizeof(res)); - status = 0; - } else{ - status = PAL_STATUS_EINVAL; - printk(KERN_WARNING"Invalid parameters " - "for PAL call:0x%lx!\n", in0); - } - - return (struct ia64_pal_retval){status, 0, 0, 0}; -} - -static struct ia64_pal_retval pal_mem_attrib(struct kvm_vcpu *vcpu) -{ - unsigned long r9; - long status; - - status = ia64_pal_mem_attrib(&r9); - - return (struct ia64_pal_retval){status, r9, 0, 0}; -} - -static void remote_pal_prefetch_visibility(void *v) -{ - s64 trans_type = (s64)v; - ia64_pal_prefetch_visibility(trans_type); -} - -static struct ia64_pal_retval pal_prefetch_visibility(struct kvm_vcpu *vcpu) -{ - struct ia64_pal_retval result = {0, 0, 0, 0}; - unsigned long in0, in1, in2, in3; - kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); - result.status = ia64_pal_prefetch_visibility(in1); - if (result.status == 0) { - /* Must be performed on all remote processors - in the coherence domain. */ - smp_call_function(remote_pal_prefetch_visibility, - (void *)in1, 1); - /* Unnecessary on remote processor for other vcpus!*/ - result.status = 1; - } - return result; -} - -static void remote_pal_mc_drain(void *v) -{ - ia64_pal_mc_drain(); -} - -static struct ia64_pal_retval pal_get_brand_info(struct kvm_vcpu *vcpu) -{ - struct ia64_pal_retval result = {0, 0, 0, 0}; - unsigned long in0, in1, in2, in3; - - kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); - - if (in1 == 0 && in2) { - char brand_info[128]; - result.status = ia64_pal_get_brand_info(brand_info); - if (result.status == PAL_STATUS_SUCCESS) - memcpy((void *)in2, brand_info, 128); - } else { - result.status = PAL_STATUS_REQUIRES_MEMORY; - printk(KERN_WARNING"Invalid parameters for " - "PAL call:0x%lx!\n", in0); - } - - return result; -} - -int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - - u64 gr28; - struct ia64_pal_retval result; - int ret = 1; - - gr28 = kvm_get_pal_call_index(vcpu); - switch (gr28) { - case PAL_CACHE_FLUSH: - result = pal_cache_flush(vcpu); - break; - case PAL_MEM_ATTRIB: - result = pal_mem_attrib(vcpu); - break; - case PAL_CACHE_SUMMARY: - result = pal_cache_summary(vcpu); - break; - case PAL_PERF_MON_INFO: - result = pal_perf_mon_info(vcpu); - break; - case PAL_HALT_INFO: - result = pal_halt_info(vcpu); - break; - case PAL_HALT_LIGHT: - { - INIT_PAL_STATUS_SUCCESS(result); - prepare_for_halt(vcpu); - if (kvm_highest_pending_irq(vcpu) == -1) - ret = kvm_emulate_halt(vcpu); - } - break; - - case PAL_PREFETCH_VISIBILITY: - result = pal_prefetch_visibility(vcpu); - break; - case PAL_MC_DRAIN: - result.status = ia64_pal_mc_drain(); - /* FIXME: All vcpus likely call PAL_MC_DRAIN. - That causes the congestion. */ - smp_call_function(remote_pal_mc_drain, NULL, 1); - break; - - case PAL_FREQ_RATIOS: - result = pal_freq_ratios(vcpu); - break; - - case PAL_FREQ_BASE: - result = pal_freq_base(vcpu); - break; - - case PAL_LOGICAL_TO_PHYSICAL : - result = pal_logical_to_physica(vcpu); - break; - - case PAL_VM_SUMMARY : - result = pal_vm_summary(vcpu); - break; - - case PAL_VM_INFO : - result = pal_vm_info(vcpu); - break; - case PAL_PLATFORM_ADDR : - result = pal_platform_addr(vcpu); - break; - case PAL_CACHE_INFO: - result = pal_cache_info(vcpu); - break; - case PAL_PTCE_INFO: - INIT_PAL_STATUS_SUCCESS(result); - result.v1 = (1L << 32) | 1L; - break; - case PAL_REGISTER_INFO: - result = pal_register_info(vcpu); - break; - case PAL_VM_PAGE_SIZE: - result.status = ia64_pal_vm_page_size(&result.v0, - &result.v1); - break; - case PAL_RSE_INFO: - result.status = ia64_pal_rse_info(&result.v0, - (pal_hints_u_t *)&result.v1); - break; - case PAL_PROC_GET_FEATURES: - result = pal_proc_get_features(vcpu); - break; - case PAL_DEBUG_INFO: - result.status = ia64_pal_debug_info(&result.v0, - &result.v1); - break; - case PAL_VERSION: - result.status = ia64_pal_version( - (pal_version_u_t *)&result.v0, - (pal_version_u_t *)&result.v1); - break; - case PAL_FIXED_ADDR: - result.status = PAL_STATUS_SUCCESS; - result.v0 = vcpu->vcpu_id; - break; - case PAL_BRAND_INFO: - result = pal_get_brand_info(vcpu); - break; - case PAL_GET_PSTATE: - case PAL_CACHE_SHARED_INFO: - INIT_PAL_STATUS_UNIMPLEMENTED(result); - break; - default: - INIT_PAL_STATUS_UNIMPLEMENTED(result); - printk(KERN_WARNING"kvm: Unsupported pal call," - " index:0x%lx\n", gr28); - } - set_pal_result(vcpu, result); - return ret; -} - -static struct sal_ret_values sal_emulator(struct kvm *kvm, - long index, unsigned long in1, - unsigned long in2, unsigned long in3, - unsigned long in4, unsigned long in5, - unsigned long in6, unsigned long in7) -{ - unsigned long r9 = 0; - unsigned long r10 = 0; - long r11 = 0; - long status; - - status = 0; - switch (index) { - case SAL_FREQ_BASE: - status = ia64_sal_freq_base(in1, &r9, &r10); - break; - case SAL_PCI_CONFIG_READ: - printk(KERN_WARNING"kvm: Not allowed to call here!" - " SAL_PCI_CONFIG_READ\n"); - break; - case SAL_PCI_CONFIG_WRITE: - printk(KERN_WARNING"kvm: Not allowed to call here!" - " SAL_PCI_CONFIG_WRITE\n"); - break; - case SAL_SET_VECTORS: - if (in1 == SAL_VECTOR_OS_BOOT_RENDEZ) { - if (in4 != 0 || in5 != 0 || in6 != 0 || in7 != 0) { - status = -2; - } else { - kvm->arch.rdv_sal_data.boot_ip = in2; - kvm->arch.rdv_sal_data.boot_gp = in3; - } - printk("Rendvous called! iip:%lx\n\n", in2); - } else - printk(KERN_WARNING"kvm: CALLED SAL_SET_VECTORS %lu." - "ignored...\n", in1); - break; - case SAL_GET_STATE_INFO: - /* No more info. */ - status = -5; - r9 = 0; - break; - case SAL_GET_STATE_INFO_SIZE: - /* Return a dummy size. */ - status = 0; - r9 = 128; - break; - case SAL_CLEAR_STATE_INFO: - /* Noop. */ - break; - case SAL_MC_RENDEZ: - printk(KERN_WARNING - "kvm: called SAL_MC_RENDEZ. ignored...\n"); - break; - case SAL_MC_SET_PARAMS: - printk(KERN_WARNING - "kvm: called SAL_MC_SET_PARAMS.ignored!\n"); - break; - case SAL_CACHE_FLUSH: - if (1) { - /*Flush using SAL. - This method is faster but has a side - effect on other vcpu running on - this cpu. */ - status = ia64_sal_cache_flush(in1); - } else { - /*Maybe need to implement the method - without side effect!*/ - status = 0; - } - break; - case SAL_CACHE_INIT: - printk(KERN_WARNING - "kvm: called SAL_CACHE_INIT. ignored...\n"); - break; - case SAL_UPDATE_PAL: - printk(KERN_WARNING - "kvm: CALLED SAL_UPDATE_PAL. ignored...\n"); - break; - default: - printk(KERN_WARNING"kvm: called SAL_CALL with unknown index." - " index:%ld\n", index); - status = -1; - break; - } - return ((struct sal_ret_values) {status, r9, r10, r11}); -} - -static void kvm_get_sal_call_data(struct kvm_vcpu *vcpu, u64 *in0, u64 *in1, - u64 *in2, u64 *in3, u64 *in4, u64 *in5, u64 *in6, u64 *in7){ - - struct exit_ctl_data *p; - - p = kvm_get_exit_data(vcpu); - - if (p->exit_reason == EXIT_REASON_SAL_CALL) { - *in0 = p->u.sal_data.in0; - *in1 = p->u.sal_data.in1; - *in2 = p->u.sal_data.in2; - *in3 = p->u.sal_data.in3; - *in4 = p->u.sal_data.in4; - *in5 = p->u.sal_data.in5; - *in6 = p->u.sal_data.in6; - *in7 = p->u.sal_data.in7; - return ; - } - *in0 = 0; -} - -void kvm_sal_emul(struct kvm_vcpu *vcpu) -{ - - struct sal_ret_values result; - u64 index, in1, in2, in3, in4, in5, in6, in7; - - kvm_get_sal_call_data(vcpu, &index, &in1, &in2, - &in3, &in4, &in5, &in6, &in7); - result = sal_emulator(vcpu->kvm, index, in1, in2, in3, - in4, in5, in6, in7); - set_sal_result(vcpu, result); -} diff --git a/arch/ia64/kvm/kvm_lib.c b/arch/ia64/kvm/kvm_lib.c deleted file mode 100644 index f1268b8e6f9e..000000000000 --- a/arch/ia64/kvm/kvm_lib.c +++ /dev/null @@ -1,21 +0,0 @@ -/* - * kvm_lib.c: Compile some libraries for kvm-intel module. - * - * Just include kernel's library, and disable symbols export. - * Copyright (C) 2008, Intel Corporation. - * Xiantao Zhang (xiantao.zhang@intel.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ -#undef CONFIG_MODULES -#include -#undef CONFIG_KALLSYMS -#undef EXPORT_SYMBOL -#undef EXPORT_SYMBOL_GPL -#define EXPORT_SYMBOL(sym) -#define EXPORT_SYMBOL_GPL(sym) -#include "../../../lib/vsprintf.c" -#include "../../../lib/ctype.c" diff --git a/arch/ia64/kvm/kvm_minstate.h b/arch/ia64/kvm/kvm_minstate.h deleted file mode 100644 index b2bcaa2787aa..000000000000 --- a/arch/ia64/kvm/kvm_minstate.h +++ /dev/null @@ -1,266 +0,0 @@ -/* - * kvm_minstate.h: min save macros - * Copyright (c) 2007, Intel Corporation. - * - * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) - * Xiantao Zhang (xiantao.zhang@intel.com) - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - * - */ - - -#include -#include -#include -#include - -#include "asm-offsets.h" - -#define KVM_MINSTATE_START_SAVE_MIN \ - mov ar.rsc = 0;/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */\ - ;; \ - mov.m r28 = ar.rnat; \ - addl r22 = VMM_RBS_OFFSET,r1; /* compute base of RBS */ \ - ;; \ - lfetch.fault.excl.nt1 [r22]; \ - addl r1 = KVM_STK_OFFSET-VMM_PT_REGS_SIZE, r1; \ - mov r23 = ar.bspstore; /* save ar.bspstore */ \ - ;; \ - mov ar.bspstore = r22; /* switch to kernel RBS */\ - ;; \ - mov r18 = ar.bsp; \ - mov ar.rsc = 0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ - - - -#define KVM_MINSTATE_END_SAVE_MIN \ - bsw.1; /* switch back to bank 1 (must be last in insn group) */\ - ;; - - -#define PAL_VSA_SYNC_READ \ - /* begin to call pal vps sync_read */ \ -{.mii; \ - add r25 = VMM_VPD_BASE_OFFSET, r21; \ - nop 0x0; \ - mov r24=ip; \ - ;; \ -} \ -{.mmb \ - add r24=0x20, r24; \ - ld8 r25 = [r25]; /* read vpd base */ \ - br.cond.sptk kvm_vps_sync_read; /*call the service*/ \ - ;; \ -}; \ - - -#define KVM_MINSTATE_GET_CURRENT(reg) mov reg=r21 - -/* - * KVM_DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves - * the minimum state necessary that allows us to turn psr.ic back - * on. - * - * Assumed state upon entry: - * psr.ic: off - * r31: contains saved predicates (pr) - * - * Upon exit, the state is as follows: - * psr.ic: off - * r2 = points to &pt_regs.r16 - * r8 = contents of ar.ccv - * r9 = contents of ar.csd - * r10 = contents of ar.ssd - * r11 = FPSR_DEFAULT - * r12 = kernel sp (kernel virtual address) - * r13 = points to current task_struct (kernel virtual address) - * p15 = TRUE if psr.i is set in cr.ipsr - * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15: - * preserved - * - * Note that psr.ic is NOT turned on by this macro. This is so that - * we can pass interruption state as arguments to a handler. - */ - - -#define PT(f) (VMM_PT_REGS_##f##_OFFSET) - -#define KVM_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \ - KVM_MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \ - mov r27 = ar.rsc; /* M */ \ - mov r20 = r1; /* A */ \ - mov r25 = ar.unat; /* M */ \ - mov r29 = cr.ipsr; /* M */ \ - mov r26 = ar.pfs; /* I */ \ - mov r18 = cr.isr; \ - COVER; /* B;; (or nothing) */ \ - ;; \ - tbit.z p0,p15 = r29,IA64_PSR_I_BIT; \ - mov r1 = r16; \ -/* mov r21=r16; */ \ - /* switch from user to kernel RBS: */ \ - ;; \ - invala; /* M */ \ - SAVE_IFS; \ - ;; \ - KVM_MINSTATE_START_SAVE_MIN \ - adds r17 = 2*L1_CACHE_BYTES,r1;/* cache-line size */ \ - adds r16 = PT(CR_IPSR),r1; \ - ;; \ - lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \ - st8 [r16] = r29; /* save cr.ipsr */ \ - ;; \ - lfetch.fault.excl.nt1 [r17]; \ - tbit.nz p15,p0 = r29,IA64_PSR_I_BIT; \ - mov r29 = b0 \ - ;; \ - adds r16 = PT(R8),r1; /* initialize first base pointer */\ - adds r17 = PT(R9),r1; /* initialize second base pointer */\ - ;; \ -.mem.offset 0,0; st8.spill [r16] = r8,16; \ -.mem.offset 8,0; st8.spill [r17] = r9,16; \ - ;; \ -.mem.offset 0,0; st8.spill [r16] = r10,24; \ -.mem.offset 8,0; st8.spill [r17] = r11,24; \ - ;; \ - mov r9 = cr.iip; /* M */ \ - mov r10 = ar.fpsr; /* M */ \ - ;; \ - st8 [r16] = r9,16; /* save cr.iip */ \ - st8 [r17] = r30,16; /* save cr.ifs */ \ - sub r18 = r18,r22; /* r18=RSE.ndirty*8 */ \ - ;; \ - st8 [r16] = r25,16; /* save ar.unat */ \ - st8 [r17] = r26,16; /* save ar.pfs */ \ - shl r18 = r18,16; /* calu ar.rsc used for "loadrs" */\ - ;; \ - st8 [r16] = r27,16; /* save ar.rsc */ \ - st8 [r17] = r28,16; /* save ar.rnat */ \ - ;; /* avoid RAW on r16 & r17 */ \ - st8 [r16] = r23,16; /* save ar.bspstore */ \ - st8 [r17] = r31,16; /* save predicates */ \ - ;; \ - st8 [r16] = r29,16; /* save b0 */ \ - st8 [r17] = r18,16; /* save ar.rsc value for "loadrs" */\ - ;; \ -.mem.offset 0,0; st8.spill [r16] = r20,16;/* save original r1 */ \ -.mem.offset 8,0; st8.spill [r17] = r12,16; \ - adds r12 = -16,r1; /* switch to kernel memory stack */ \ - ;; \ -.mem.offset 0,0; st8.spill [r16] = r13,16; \ -.mem.offset 8,0; st8.spill [r17] = r10,16; /* save ar.fpsr */\ - mov r13 = r21; /* establish `current' */ \ - ;; \ -.mem.offset 0,0; st8.spill [r16] = r15,16; \ -.mem.offset 8,0; st8.spill [r17] = r14,16; \ - ;; \ -.mem.offset 0,0; st8.spill [r16] = r2,16; \ -.mem.offset 8,0; st8.spill [r17] = r3,16; \ - adds r2 = VMM_PT_REGS_R16_OFFSET,r1; \ - ;; \ - adds r16 = VMM_VCPU_IIPA_OFFSET,r13; \ - adds r17 = VMM_VCPU_ISR_OFFSET,r13; \ - mov r26 = cr.iipa; \ - mov r27 = cr.isr; \ - ;; \ - st8 [r16] = r26; \ - st8 [r17] = r27; \ - ;; \ - EXTRA; \ - mov r8 = ar.ccv; \ - mov r9 = ar.csd; \ - mov r10 = ar.ssd; \ - movl r11 = FPSR_DEFAULT; /* L-unit */ \ - adds r17 = VMM_VCPU_GP_OFFSET,r13; \ - ;; \ - ld8 r1 = [r17];/* establish kernel global pointer */ \ - ;; \ - PAL_VSA_SYNC_READ \ - KVM_MINSTATE_END_SAVE_MIN - -/* - * SAVE_REST saves the remainder of pt_regs (with psr.ic on). - * - * Assumed state upon entry: - * psr.ic: on - * r2: points to &pt_regs.f6 - * r3: points to &pt_regs.f7 - * r8: contents of ar.ccv - * r9: contents of ar.csd - * r10: contents of ar.ssd - * r11: FPSR_DEFAULT - * - * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST. - */ -#define KVM_SAVE_REST \ -.mem.offset 0,0; st8.spill [r2] = r16,16; \ -.mem.offset 8,0; st8.spill [r3] = r17,16; \ - ;; \ -.mem.offset 0,0; st8.spill [r2] = r18,16; \ -.mem.offset 8,0; st8.spill [r3] = r19,16; \ - ;; \ -.mem.offset 0,0; st8.spill [r2] = r20,16; \ -.mem.offset 8,0; st8.spill [r3] = r21,16; \ - mov r18=b6; \ - ;; \ -.mem.offset 0,0; st8.spill [r2] = r22,16; \ -.mem.offset 8,0; st8.spill [r3] = r23,16; \ - mov r19 = b7; \ - ;; \ -.mem.offset 0,0; st8.spill [r2] = r24,16; \ -.mem.offset 8,0; st8.spill [r3] = r25,16; \ - ;; \ -.mem.offset 0,0; st8.spill [r2] = r26,16; \ -.mem.offset 8,0; st8.spill [r3] = r27,16; \ - ;; \ -.mem.offset 0,0; st8.spill [r2] = r28,16; \ -.mem.offset 8,0; st8.spill [r3] = r29,16; \ - ;; \ -.mem.offset 0,0; st8.spill [r2] = r30,16; \ -.mem.offset 8,0; st8.spill [r3] = r31,32; \ - ;; \ - mov ar.fpsr = r11; \ - st8 [r2] = r8,8; \ - adds r24 = PT(B6)-PT(F7),r3; \ - adds r25 = PT(B7)-PT(F7),r3; \ - ;; \ - st8 [r24] = r18,16; /* b6 */ \ - st8 [r25] = r19,16; /* b7 */ \ - adds r2 = PT(R4)-PT(F6),r2; \ - adds r3 = PT(R5)-PT(F7),r3; \ - ;; \ - st8 [r24] = r9; /* ar.csd */ \ - st8 [r25] = r10; /* ar.ssd */ \ - ;; \ - mov r18 = ar.unat; \ - adds r19 = PT(EML_UNAT)-PT(R4),r2; \ - ;; \ - st8 [r19] = r18; /* eml_unat */ \ - - -#define KVM_SAVE_EXTRA \ -.mem.offset 0,0; st8.spill [r2] = r4,16; \ -.mem.offset 8,0; st8.spill [r3] = r5,16; \ - ;; \ -.mem.offset 0,0; st8.spill [r2] = r6,16; \ -.mem.offset 8,0; st8.spill [r3] = r7; \ - ;; \ - mov r26 = ar.unat; \ - ;; \ - st8 [r2] = r26;/* eml_unat */ \ - -#define KVM_SAVE_MIN_WITH_COVER KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs,) -#define KVM_SAVE_MIN_WITH_COVER_R19 KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs, mov r15 = r19) -#define KVM_SAVE_MIN KVM_DO_SAVE_MIN( , mov r30 = r0, ) diff --git a/arch/ia64/kvm/lapic.h b/arch/ia64/kvm/lapic.h deleted file mode 100644 index c5f92a926a9a..000000000000 --- a/arch/ia64/kvm/lapic.h +++ /dev/null @@ -1,30 +0,0 @@ -#ifndef __KVM_IA64_LAPIC_H -#define __KVM_IA64_LAPIC_H - -#include - -/* - * vlsapic - */ -struct kvm_lapic{ - struct kvm_vcpu *vcpu; - uint64_t insvc[4]; - uint64_t vhpi; - uint8_t xtp; - uint8_t pal_init_pending; - uint8_t pad[2]; -}; - -int kvm_create_lapic(struct kvm_vcpu *vcpu); -void kvm_free_lapic(struct kvm_vcpu *vcpu); - -int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest); -int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda); -int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, - int short_hand, int dest, int dest_mode); -int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2); -int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq); -#define kvm_apic_present(x) (true) -#define kvm_lapic_enabled(x) (true) - -#endif diff --git a/arch/ia64/kvm/memcpy.S b/arch/ia64/kvm/memcpy.S deleted file mode 100644 index c04cdbe9f80f..000000000000 --- a/arch/ia64/kvm/memcpy.S +++ /dev/null @@ -1 +0,0 @@ -#include "../lib/memcpy.S" diff --git a/arch/ia64/kvm/memset.S b/arch/ia64/kvm/memset.S deleted file mode 100644 index 83c3066d844a..000000000000 --- a/arch/ia64/kvm/memset.S +++ /dev/null @@ -1 +0,0 @@ -#include "../lib/memset.S" diff --git a/arch/ia64/kvm/misc.h b/arch/ia64/kvm/misc.h deleted file mode 100644 index dd979e00b574..000000000000 --- a/arch/ia64/kvm/misc.h +++ /dev/null @@ -1,94 +0,0 @@ -#ifndef __KVM_IA64_MISC_H -#define __KVM_IA64_MISC_H - -#include -/* - * misc.h - * Copyright (C) 2007, Intel Corporation. - * Xiantao Zhang (xiantao.zhang@intel.com) - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - * - */ - -/* - *Return p2m base address at host side! - */ -static inline uint64_t *kvm_host_get_pmt(struct kvm *kvm) -{ - return (uint64_t *)(kvm->arch.vm_base + - offsetof(struct kvm_vm_data, kvm_p2m)); -} - -static inline void kvm_set_pmt_entry(struct kvm *kvm, gfn_t gfn, - u64 paddr, u64 mem_flags) -{ - uint64_t *pmt_base = kvm_host_get_pmt(kvm); - unsigned long pte; - - pte = PAGE_ALIGN(paddr) | mem_flags; - pmt_base[gfn] = pte; -} - -/*Function for translating host address to guest address*/ - -static inline void *to_guest(struct kvm *kvm, void *addr) -{ - return (void *)((unsigned long)(addr) - kvm->arch.vm_base + - KVM_VM_DATA_BASE); -} - -/*Function for translating guest address to host address*/ - -static inline void *to_host(struct kvm *kvm, void *addr) -{ - return (void *)((unsigned long)addr - KVM_VM_DATA_BASE - + kvm->arch.vm_base); -} - -/* Get host context of the vcpu */ -static inline union context *kvm_get_host_context(struct kvm_vcpu *vcpu) -{ - union context *ctx = &vcpu->arch.host; - return to_guest(vcpu->kvm, ctx); -} - -/* Get guest context of the vcpu */ -static inline union context *kvm_get_guest_context(struct kvm_vcpu *vcpu) -{ - union context *ctx = &vcpu->arch.guest; - return to_guest(vcpu->kvm, ctx); -} - -/* kvm get exit data from gvmm! */ -static inline struct exit_ctl_data *kvm_get_exit_data(struct kvm_vcpu *vcpu) -{ - return &vcpu->arch.exit_data; -} - -/*kvm get vcpu ioreq for kvm module!*/ -static inline struct kvm_mmio_req *kvm_get_vcpu_ioreq(struct kvm_vcpu *vcpu) -{ - struct exit_ctl_data *p_ctl_data; - - if (vcpu) { - p_ctl_data = kvm_get_exit_data(vcpu); - if (p_ctl_data->exit_reason == EXIT_REASON_MMIO_INSTRUCTION) - return &p_ctl_data->u.ioreq; - } - - return NULL; -} - -#endif diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c deleted file mode 100644 index f1e17d3d6cd9..000000000000 --- a/arch/ia64/kvm/mmio.c +++ /dev/null @@ -1,336 +0,0 @@ -/* - * mmio.c: MMIO emulation components. - * Copyright (c) 2004, Intel Corporation. - * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com) - * Kun Tian (Kevin Tian) (Kevin.tian@intel.com) - * - * Copyright (c) 2007 Intel Corporation KVM support. - * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com) - * Xiantao Zhang (xiantao.zhang@intel.com) - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - * - */ - -#include - -#include "vcpu.h" - -static void vlsapic_write_xtp(struct kvm_vcpu *v, uint8_t val) -{ - VLSAPIC_XTP(v) = val; -} - -/* - * LSAPIC OFFSET - */ -#define PIB_LOW_HALF(ofst) !(ofst & (1 << 20)) -#define PIB_OFST_INTA 0x1E0000 -#define PIB_OFST_XTP 0x1E0008 - -/* - * execute write IPI op. - */ -static void vlsapic_write_ipi(struct kvm_vcpu *vcpu, - uint64_t addr, uint64_t data) -{ - struct exit_ctl_data *p = ¤t_vcpu->arch.exit_data; - unsigned long psr; - - local_irq_save(psr); - - p->exit_reason = EXIT_REASON_IPI; - p->u.ipi_data.addr.val = addr; - p->u.ipi_data.data.val = data; - vmm_transition(current_vcpu); - - local_irq_restore(psr); - -} - -void lsapic_write(struct kvm_vcpu *v, unsigned long addr, - unsigned long length, unsigned long val) -{ - addr &= (PIB_SIZE - 1); - - switch (addr) { - case PIB_OFST_INTA: - panic_vm(v, "Undefined write on PIB INTA\n"); - break; - case PIB_OFST_XTP: - if (length == 1) { - vlsapic_write_xtp(v, val); - } else { - panic_vm(v, "Undefined write on PIB XTP\n"); - } - break; - default: - if (PIB_LOW_HALF(addr)) { - /*Lower half */ - if (length != 8) - panic_vm(v, "Can't LHF write with size %ld!\n", - length); - else - vlsapic_write_ipi(v, addr, val); - } else { /*Upper half */ - panic_vm(v, "IPI-UHF write %lx\n", addr); - } - break; - } -} - -unsigned long lsapic_read(struct kvm_vcpu *v, unsigned long addr, - unsigned long length) -{ - uint64_t result = 0; - - addr &= (PIB_SIZE - 1); - - switch (addr) { - case PIB_OFST_INTA: - if (length == 1) /* 1 byte load */ - ; /* There is no i8259, there is no INTA access*/ - else - panic_vm(v, "Undefined read on PIB INTA\n"); - - break; - case PIB_OFST_XTP: - if (length == 1) { - result = VLSAPIC_XTP(v); - } else { - panic_vm(v, "Undefined read on PIB XTP\n"); - } - break; - default: - panic_vm(v, "Undefined addr access for lsapic!\n"); - break; - } - return result; -} - -static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest, - u16 s, int ma, int dir) -{ - unsigned long iot; - struct exit_ctl_data *p = &vcpu->arch.exit_data; - unsigned long psr; - - iot = __gpfn_is_io(src_pa >> PAGE_SHIFT); - - local_irq_save(psr); - - /*Intercept the access for PIB range*/ - if (iot == GPFN_PIB) { - if (!dir) - lsapic_write(vcpu, src_pa, s, *dest); - else - *dest = lsapic_read(vcpu, src_pa, s); - goto out; - } - p->exit_reason = EXIT_REASON_MMIO_INSTRUCTION; - p->u.ioreq.addr = src_pa; - p->u.ioreq.size = s; - p->u.ioreq.dir = dir; - if (dir == IOREQ_WRITE) - p->u.ioreq.data = *dest; - p->u.ioreq.state = STATE_IOREQ_READY; - vmm_transition(vcpu); - - if (p->u.ioreq.state == STATE_IORESP_READY) { - if (dir == IOREQ_READ) - /* it's necessary to ensure zero extending */ - *dest = p->u.ioreq.data & (~0UL >> (64-(s*8))); - } else - panic_vm(vcpu, "Unhandled mmio access returned!\n"); -out: - local_irq_restore(psr); - return ; -} - -/* - dir 1: read 0:write - inst_type 0:integer 1:floating point - */ -#define SL_INTEGER 0 /* store/load interger*/ -#define SL_FLOATING 1 /* store/load floating*/ - -void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma) -{ - struct kvm_pt_regs *regs; - IA64_BUNDLE bundle; - int slot, dir = 0; - int inst_type = -1; - u16 size = 0; - u64 data, slot1a, slot1b, temp, update_reg; - s32 imm; - INST64 inst; - - regs = vcpu_regs(vcpu); - - if (fetch_code(vcpu, regs->cr_iip, &bundle)) { - /* if fetch code fail, return and try again */ - return; - } - slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri; - if (!slot) - inst.inst = bundle.slot0; - else if (slot == 1) { - slot1a = bundle.slot1a; - slot1b = bundle.slot1b; - inst.inst = slot1a + (slot1b << 18); - } else if (slot == 2) - inst.inst = bundle.slot2; - - /* Integer Load/Store */ - if (inst.M1.major == 4 && inst.M1.m == 0 && inst.M1.x == 0) { - inst_type = SL_INTEGER; - size = (inst.M1.x6 & 0x3); - if ((inst.M1.x6 >> 2) > 0xb) { - /*write*/ - dir = IOREQ_WRITE; - data = vcpu_get_gr(vcpu, inst.M4.r2); - } else if ((inst.M1.x6 >> 2) < 0xb) { - /*read*/ - dir = IOREQ_READ; - } - } else if (inst.M2.major == 4 && inst.M2.m == 1 && inst.M2.x == 0) { - /* Integer Load + Reg update */ - inst_type = SL_INTEGER; - dir = IOREQ_READ; - size = (inst.M2.x6 & 0x3); - temp = vcpu_get_gr(vcpu, inst.M2.r3); - update_reg = vcpu_get_gr(vcpu, inst.M2.r2); - temp += update_reg; - vcpu_set_gr(vcpu, inst.M2.r3, temp, 0); - } else if (inst.M3.major == 5) { - /*Integer Load/Store + Imm update*/ - inst_type = SL_INTEGER; - size = (inst.M3.x6&0x3); - if ((inst.M5.x6 >> 2) > 0xb) { - /*write*/ - dir = IOREQ_WRITE; - data = vcpu_get_gr(vcpu, inst.M5.r2); - temp = vcpu_get_gr(vcpu, inst.M5.r3); - imm = (inst.M5.s << 31) | (inst.M5.i << 30) | - (inst.M5.imm7 << 23); - temp += imm >> 23; - vcpu_set_gr(vcpu, inst.M5.r3, temp, 0); - - } else if ((inst.M3.x6 >> 2) < 0xb) { - /*read*/ - dir = IOREQ_READ; - temp = vcpu_get_gr(vcpu, inst.M3.r3); - imm = (inst.M3.s << 31) | (inst.M3.i << 30) | - (inst.M3.imm7 << 23); - temp += imm >> 23; - vcpu_set_gr(vcpu, inst.M3.r3, temp, 0); - - } - } else if (inst.M9.major == 6 && inst.M9.x6 == 0x3B - && inst.M9.m == 0 && inst.M9.x == 0) { - /* Floating-point spill*/ - struct ia64_fpreg v; - - inst_type = SL_FLOATING; - dir = IOREQ_WRITE; - vcpu_get_fpreg(vcpu, inst.M9.f2, &v); - /* Write high word. FIXME: this is a kludge! */ - v.u.bits[1] &= 0x3ffff; - mmio_access(vcpu, padr + 8, (u64 *)&v.u.bits[1], 8, - ma, IOREQ_WRITE); - data = v.u.bits[0]; - size = 3; - } else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) { - /* Floating-point spill + Imm update */ - struct ia64_fpreg v; - - inst_type = SL_FLOATING; - dir = IOREQ_WRITE; - vcpu_get_fpreg(vcpu, inst.M10.f2, &v); - temp = vcpu_get_gr(vcpu, inst.M10.r3); - imm = (inst.M10.s << 31) | (inst.M10.i << 30) | - (inst.M10.imm7 << 23); - temp += imm >> 23; - vcpu_set_gr(vcpu, inst.M10.r3, temp, 0); - - /* Write high word.FIXME: this is a kludge! */ - v.u.bits[1] &= 0x3ffff; - mmio_access(vcpu, padr + 8, (u64 *)&v.u.bits[1], - 8, ma, IOREQ_WRITE); - data = v.u.bits[0]; - size = 3; - } else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) { - /* Floating-point stf8 + Imm update */ - struct ia64_fpreg v; - inst_type = SL_FLOATING; - dir = IOREQ_WRITE; - size = 3; - vcpu_get_fpreg(vcpu, inst.M10.f2, &v); - data = v.u.bits[0]; /* Significand. */ - temp = vcpu_get_gr(vcpu, inst.M10.r3); - imm = (inst.M10.s << 31) | (inst.M10.i << 30) | - (inst.M10.imm7 << 23); - temp += imm >> 23; - vcpu_set_gr(vcpu, inst.M10.r3, temp, 0); - } else if (inst.M15.major == 7 && inst.M15.x6 >= 0x2c - && inst.M15.x6 <= 0x2f) { - temp = vcpu_get_gr(vcpu, inst.M15.r3); - imm = (inst.M15.s << 31) | (inst.M15.i << 30) | - (inst.M15.imm7 << 23); - temp += imm >> 23; - vcpu_set_gr(vcpu, inst.M15.r3, temp, 0); - - vcpu_increment_iip(vcpu); - return; - } else if (inst.M12.major == 6 && inst.M12.m == 1 - && inst.M12.x == 1 && inst.M12.x6 == 1) { - /* Floating-point Load Pair + Imm ldfp8 M12*/ - struct ia64_fpreg v; - - inst_type = SL_FLOATING; - dir = IOREQ_READ; - size = 8; /*ldfd*/ - mmio_access(vcpu, padr, &data, size, ma, dir); - v.u.bits[0] = data; - v.u.bits[1] = 0x1003E; - vcpu_set_fpreg(vcpu, inst.M12.f1, &v); - padr += 8; - mmio_access(vcpu, padr, &data, size, ma, dir); - v.u.bits[0] = data; - v.u.bits[1] = 0x1003E; - vcpu_set_fpreg(vcpu, inst.M12.f2, &v); - padr += 8; - vcpu_set_gr(vcpu, inst.M12.r3, padr, 0); - vcpu_increment_iip(vcpu); - return; - } else { - inst_type = -1; - panic_vm(vcpu, "Unsupported MMIO access instruction! " - "Bunld[0]=0x%lx, Bundle[1]=0x%lx\n", - bundle.i64[0], bundle.i64[1]); - } - - size = 1 << size; - if (dir == IOREQ_WRITE) { - mmio_access(vcpu, padr, &data, size, ma, dir); - } else { - mmio_access(vcpu, padr, &data, size, ma, dir); - if (inst_type == SL_INTEGER) - vcpu_set_gr(vcpu, inst.M1.r1, data, 0); - else - panic_vm(vcpu, "Unsupported instruction type!\n"); - - } - vcpu_increment_iip(vcpu); -} diff --git a/arch/ia64/kvm/optvfault.S b/arch/ia64/kvm/optvfault.S deleted file mode 100644 index f793be3effff..000000000000 --- a/arch/ia64/kvm/optvfault.S +++ /dev/null @@ -1,1090 +0,0 @@ -/* - * arch/ia64/kvm/optvfault.S - * optimize virtualization fault handler - * - * Copyright (C) 2006 Intel Co - * Xuefei Xu (Anthony Xu) - * Copyright (C) 2008 Intel Co - * Add the support for Tukwila processors. - * Xiantao Zhang - */ - -#include -#include -#include - -#include "vti.h" -#include "asm-offsets.h" - -#define ACCE_MOV_FROM_AR -#define ACCE_MOV_FROM_RR -#define ACCE_MOV_TO_RR -#define ACCE_RSM -#define ACCE_SSM -#define ACCE_MOV_TO_PSR -#define ACCE_THASH - -#define VMX_VPS_SYNC_READ \ - add r16=VMM_VPD_BASE_OFFSET,r21; \ - mov r17 = b0; \ - mov r18 = r24; \ - mov r19 = r25; \ - mov r20 = r31; \ - ;; \ -{.mii; \ - ld8 r16 = [r16]; \ - nop 0x0; \ - mov r24 = ip; \ - ;; \ -}; \ -{.mmb; \ - add r24=0x20, r24; \ - mov r25 =r16; \ - br.sptk.many kvm_vps_sync_read; \ -}; \ - mov b0 = r17; \ - mov r24 = r18; \ - mov r25 = r19; \ - mov r31 = r20 - -ENTRY(kvm_vps_entry) - adds r29 = VMM_VCPU_VSA_BASE_OFFSET,r21 - ;; - ld8 r29 = [r29] - ;; - add r29 = r29, r30 - ;; - mov b0 = r29 - br.sptk.many b0 -END(kvm_vps_entry) - -/* - * Inputs: - * r24 : return address - * r25 : vpd - * r29 : scratch - * - */ -GLOBAL_ENTRY(kvm_vps_sync_read) - movl r30 = PAL_VPS_SYNC_READ - ;; - br.sptk.many kvm_vps_entry -END(kvm_vps_sync_read) - -/* - * Inputs: - * r24 : return address - * r25 : vpd - * r29 : scratch - * - */ -GLOBAL_ENTRY(kvm_vps_sync_write) - movl r30 = PAL_VPS_SYNC_WRITE - ;; - br.sptk.many kvm_vps_entry -END(kvm_vps_sync_write) - -/* - * Inputs: - * r23 : pr - * r24 : guest b0 - * r25 : vpd - * - */ -GLOBAL_ENTRY(kvm_vps_resume_normal) - movl r30 = PAL_VPS_RESUME_NORMAL - ;; - mov pr=r23,-2 - br.sptk.many kvm_vps_entry -END(kvm_vps_resume_normal) - -/* - * Inputs: - * r23 : pr - * r24 : guest b0 - * r25 : vpd - * r17 : isr - */ -GLOBAL_ENTRY(kvm_vps_resume_handler) - movl r30 = PAL_VPS_RESUME_HANDLER - ;; - ld8 r26=[r25] - shr r17=r17,IA64_ISR_IR_BIT - ;; - dep r26=r17,r26,63,1 // bit 63 of r26 indicate whether enable CFLE - mov pr=r23,-2 - br.sptk.many kvm_vps_entry -END(kvm_vps_resume_handler) - -//mov r1=ar3 -GLOBAL_ENTRY(kvm_asm_mov_from_ar) -#ifndef ACCE_MOV_FROM_AR - br.many kvm_virtualization_fault_back -#endif - add r18=VMM_VCPU_ITC_OFS_OFFSET, r21 - add r16=VMM_VCPU_LAST_ITC_OFFSET,r21 - extr.u r17=r25,6,7 - ;; - ld8 r18=[r18] - mov r19=ar.itc - mov r24=b0 - ;; - add r19=r19,r18 - addl r20=@gprel(asm_mov_to_reg),gp - ;; - st8 [r16] = r19 - adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20 - shladd r17=r17,4,r20 - ;; - mov b0=r17 - br.sptk.few b0 - ;; -END(kvm_asm_mov_from_ar) - -/* - * Special SGI SN2 optimized version of mov_from_ar using the SN2 RTC - * clock as it's source for emulating the ITC. This version will be - * copied on top of the original version if the host is determined to - * be an SN2. - */ -GLOBAL_ENTRY(kvm_asm_mov_from_ar_sn2) - add r18=VMM_VCPU_ITC_OFS_OFFSET, r21 - movl r19 = (KVM_VMM_BASE+(1<arch.vrr[0]'s addr - adds r16=VMM_VPD_VPTA_OFFSET,r16 // get vpta - ;; - shladd r27=r23,3,r28 // get vcpu->arch.vrr[r23]'s addr - ld8 r17=[r16] // get PTA - mov r26=1 - ;; - extr.u r29=r17,2,6 // get pta.size - ld8 r28=[r27] // get vcpu->arch.vrr[r23]'s value - ;; - mov b0=r24 - //Fallback to C if pta.vf is set - tbit.nz p6,p0=r17, 8 - ;; - (p6) mov r24=EVENT_THASH - (p6) br.cond.dpnt.many kvm_virtualization_fault_back - extr.u r28=r28,2,6 // get rr.ps - shl r22=r26,r29 // 1UL << pta.size - ;; - shr.u r23=r19,r28 // vaddr >> rr.ps - adds r26=3,r29 // pta.size + 3 - shl r27=r17,3 // pta << 3 - ;; - shl r23=r23,3 // (vaddr >> rr.ps) << 3 - shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3) - movl r16=7<<61 - ;; - adds r22=-1,r22 // (1UL << pta.size) - 1 - shl r27=r27,r29 // ((pta<<3)>>(pta.size+3))<>(pta.size + 3))< - * Xiaoyan Feng (Fleming Feng) - * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) - * Xiantao Zhang (xiantao.zhang@intel.com) - */ -#include "vcpu.h" - -#include -#include -#include -#include -#include - -fpswa_interface_t *vmm_fpswa_interface; - -#define IA64_VHPT_TRANS_VECTOR 0x0000 -#define IA64_INST_TLB_VECTOR 0x0400 -#define IA64_DATA_TLB_VECTOR 0x0800 -#define IA64_ALT_INST_TLB_VECTOR 0x0c00 -#define IA64_ALT_DATA_TLB_VECTOR 0x1000 -#define IA64_DATA_NESTED_TLB_VECTOR 0x1400 -#define IA64_INST_KEY_MISS_VECTOR 0x1800 -#define IA64_DATA_KEY_MISS_VECTOR 0x1c00 -#define IA64_DIRTY_BIT_VECTOR 0x2000 -#define IA64_INST_ACCESS_BIT_VECTOR 0x2400 -#define IA64_DATA_ACCESS_BIT_VECTOR 0x2800 -#define IA64_BREAK_VECTOR 0x2c00 -#define IA64_EXTINT_VECTOR 0x3000 -#define IA64_PAGE_NOT_PRESENT_VECTOR 0x5000 -#define IA64_KEY_PERMISSION_VECTOR 0x5100 -#define IA64_INST_ACCESS_RIGHTS_VECTOR 0x5200 -#define IA64_DATA_ACCESS_RIGHTS_VECTOR 0x5300 -#define IA64_GENEX_VECTOR 0x5400 -#define IA64_DISABLED_FPREG_VECTOR 0x5500 -#define IA64_NAT_CONSUMPTION_VECTOR 0x5600 -#define IA64_SPECULATION_VECTOR 0x5700 /* UNUSED */ -#define IA64_DEBUG_VECTOR 0x5900 -#define IA64_UNALIGNED_REF_VECTOR 0x5a00 -#define IA64_UNSUPPORTED_DATA_REF_VECTOR 0x5b00 -#define IA64_FP_FAULT_VECTOR 0x5c00 -#define IA64_FP_TRAP_VECTOR 0x5d00 -#define IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR 0x5e00 -#define IA64_TAKEN_BRANCH_TRAP_VECTOR 0x5f00 -#define IA64_SINGLE_STEP_TRAP_VECTOR 0x6000 - -/* SDM vol2 5.5 - IVA based interruption handling */ -#define INITIAL_PSR_VALUE_AT_INTERRUPTION (IA64_PSR_UP | IA64_PSR_MFL |\ - IA64_PSR_MFH | IA64_PSR_PK | IA64_PSR_DT | \ - IA64_PSR_RT | IA64_PSR_MC|IA64_PSR_IT) - -#define DOMN_PAL_REQUEST 0x110000 -#define DOMN_SAL_REQUEST 0x110001 - -static u64 vec2off[68] = {0x0, 0x400, 0x800, 0xc00, 0x1000, 0x1400, 0x1800, - 0x1c00, 0x2000, 0x2400, 0x2800, 0x2c00, 0x3000, 0x3400, 0x3800, 0x3c00, - 0x4000, 0x4400, 0x4800, 0x4c00, 0x5000, 0x5100, 0x5200, 0x5300, 0x5400, - 0x5500, 0x5600, 0x5700, 0x5800, 0x5900, 0x5a00, 0x5b00, 0x5c00, 0x5d00, - 0x5e00, 0x5f00, 0x6000, 0x6100, 0x6200, 0x6300, 0x6400, 0x6500, 0x6600, - 0x6700, 0x6800, 0x6900, 0x6a00, 0x6b00, 0x6c00, 0x6d00, 0x6e00, 0x6f00, - 0x7000, 0x7100, 0x7200, 0x7300, 0x7400, 0x7500, 0x7600, 0x7700, 0x7800, - 0x7900, 0x7a00, 0x7b00, 0x7c00, 0x7d00, 0x7e00, 0x7f00 -}; - -static void collect_interruption(struct kvm_vcpu *vcpu) -{ - u64 ipsr; - u64 vdcr; - u64 vifs; - unsigned long vpsr; - struct kvm_pt_regs *regs = vcpu_regs(vcpu); - - vpsr = vcpu_get_psr(vcpu); - vcpu_bsw0(vcpu); - if (vpsr & IA64_PSR_IC) { - - /* Sync mpsr id/da/dd/ss/ed bits to vipsr - * since after guest do rfi, we still want these bits on in - * mpsr - */ - - ipsr = regs->cr_ipsr; - vpsr = vpsr | (ipsr & (IA64_PSR_ID | IA64_PSR_DA - | IA64_PSR_DD | IA64_PSR_SS - | IA64_PSR_ED)); - vcpu_set_ipsr(vcpu, vpsr); - - /* Currently, for trap, we do not advance IIP to next - * instruction. That's because we assume caller already - * set up IIP correctly - */ - - vcpu_set_iip(vcpu , regs->cr_iip); - - /* set vifs.v to zero */ - vifs = VCPU(vcpu, ifs); - vifs &= ~IA64_IFS_V; - vcpu_set_ifs(vcpu, vifs); - - vcpu_set_iipa(vcpu, VMX(vcpu, cr_iipa)); - } - - vdcr = VCPU(vcpu, dcr); - - /* Set guest psr - * up/mfl/mfh/pk/dt/rt/mc/it keeps unchanged - * be: set to the value of dcr.be - * pp: set to the value of dcr.pp - */ - vpsr &= INITIAL_PSR_VALUE_AT_INTERRUPTION; - vpsr |= (vdcr & IA64_DCR_BE); - - /* VDCR pp bit position is different from VPSR pp bit */ - if (vdcr & IA64_DCR_PP) { - vpsr |= IA64_PSR_PP; - } else { - vpsr &= ~IA64_PSR_PP; - } - - vcpu_set_psr(vcpu, vpsr); - -} - -void inject_guest_interruption(struct kvm_vcpu *vcpu, u64 vec) -{ - u64 viva; - struct kvm_pt_regs *regs; - union ia64_isr pt_isr; - - regs = vcpu_regs(vcpu); - - /* clear cr.isr.ir (incomplete register frame)*/ - pt_isr.val = VMX(vcpu, cr_isr); - pt_isr.ir = 0; - VMX(vcpu, cr_isr) = pt_isr.val; - - collect_interruption(vcpu); - - viva = vcpu_get_iva(vcpu); - regs->cr_iip = viva + vec; -} - -static u64 vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, u64 ifa) -{ - union ia64_rr rr, rr1; - - rr.val = vcpu_get_rr(vcpu, ifa); - rr1.val = 0; - rr1.ps = rr.ps; - rr1.rid = rr.rid; - return (rr1.val); -} - -/* - * Set vIFA & vITIR & vIHA, when vPSR.ic =1 - * Parameter: - * set_ifa: if true, set vIFA - * set_itir: if true, set vITIR - * set_iha: if true, set vIHA - */ -void set_ifa_itir_iha(struct kvm_vcpu *vcpu, u64 vadr, - int set_ifa, int set_itir, int set_iha) -{ - long vpsr; - u64 value; - - vpsr = VCPU(vcpu, vpsr); - /* Vol2, Table 8-1 */ - if (vpsr & IA64_PSR_IC) { - if (set_ifa) - vcpu_set_ifa(vcpu, vadr); - if (set_itir) { - value = vcpu_get_itir_on_fault(vcpu, vadr); - vcpu_set_itir(vcpu, value); - } - - if (set_iha) { - value = vcpu_thash(vcpu, vadr); - vcpu_set_iha(vcpu, value); - } - } -} - -/* - * Data TLB Fault - * @ Data TLB vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ -void dtlb_fault(struct kvm_vcpu *vcpu, u64 vadr) -{ - /* If vPSR.ic, IFA, ITIR, IHA */ - set_ifa_itir_iha(vcpu, vadr, 1, 1, 1); - inject_guest_interruption(vcpu, IA64_DATA_TLB_VECTOR); -} - -/* - * Instruction TLB Fault - * @ Instruction TLB vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ -void itlb_fault(struct kvm_vcpu *vcpu, u64 vadr) -{ - /* If vPSR.ic, IFA, ITIR, IHA */ - set_ifa_itir_iha(vcpu, vadr, 1, 1, 1); - inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR); -} - -/* - * Data Nested TLB Fault - * @ Data Nested TLB Vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ -void nested_dtlb(struct kvm_vcpu *vcpu) -{ - inject_guest_interruption(vcpu, IA64_DATA_NESTED_TLB_VECTOR); -} - -/* - * Alternate Data TLB Fault - * @ Alternate Data TLB vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ -void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr) -{ - set_ifa_itir_iha(vcpu, vadr, 1, 1, 0); - inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR); -} - -/* - * Data TLB Fault - * @ Data TLB vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ -void alt_itlb(struct kvm_vcpu *vcpu, u64 vadr) -{ - set_ifa_itir_iha(vcpu, vadr, 1, 1, 0); - inject_guest_interruption(vcpu, IA64_ALT_INST_TLB_VECTOR); -} - -/* Deal with: - * VHPT Translation Vector - */ -static void _vhpt_fault(struct kvm_vcpu *vcpu, u64 vadr) -{ - /* If vPSR.ic, IFA, ITIR, IHA*/ - set_ifa_itir_iha(vcpu, vadr, 1, 1, 1); - inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR); -} - -/* - * VHPT Instruction Fault - * @ VHPT Translation vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ -void ivhpt_fault(struct kvm_vcpu *vcpu, u64 vadr) -{ - _vhpt_fault(vcpu, vadr); -} - -/* - * VHPT Data Fault - * @ VHPT Translation vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ -void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr) -{ - _vhpt_fault(vcpu, vadr); -} - -/* - * Deal with: - * General Exception vector - */ -void _general_exception(struct kvm_vcpu *vcpu) -{ - inject_guest_interruption(vcpu, IA64_GENEX_VECTOR); -} - -/* - * Illegal Operation Fault - * @ General Exception Vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ -void illegal_op(struct kvm_vcpu *vcpu) -{ - _general_exception(vcpu); -} - -/* - * Illegal Dependency Fault - * @ General Exception Vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ -void illegal_dep(struct kvm_vcpu *vcpu) -{ - _general_exception(vcpu); -} - -/* - * Reserved Register/Field Fault - * @ General Exception Vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ -void rsv_reg_field(struct kvm_vcpu *vcpu) -{ - _general_exception(vcpu); -} -/* - * Privileged Operation Fault - * @ General Exception Vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ - -void privilege_op(struct kvm_vcpu *vcpu) -{ - _general_exception(vcpu); -} - -/* - * Unimplement Data Address Fault - * @ General Exception Vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ -void unimpl_daddr(struct kvm_vcpu *vcpu) -{ - _general_exception(vcpu); -} - -/* - * Privileged Register Fault - * @ General Exception Vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ -void privilege_reg(struct kvm_vcpu *vcpu) -{ - _general_exception(vcpu); -} - -/* Deal with - * Nat consumption vector - * Parameter: - * vaddr: Optional, if t == REGISTER - */ -static void _nat_consumption_fault(struct kvm_vcpu *vcpu, u64 vadr, - enum tlb_miss_type t) -{ - /* If vPSR.ic && t == DATA/INST, IFA */ - if (t == DATA || t == INSTRUCTION) { - /* IFA */ - set_ifa_itir_iha(vcpu, vadr, 1, 0, 0); - } - - inject_guest_interruption(vcpu, IA64_NAT_CONSUMPTION_VECTOR); -} - -/* - * Instruction Nat Page Consumption Fault - * @ Nat Consumption Vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ -void inat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr) -{ - _nat_consumption_fault(vcpu, vadr, INSTRUCTION); -} - -/* - * Register Nat Consumption Fault - * @ Nat Consumption Vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ -void rnat_consumption(struct kvm_vcpu *vcpu) -{ - _nat_consumption_fault(vcpu, 0, REGISTER); -} - -/* - * Data Nat Page Consumption Fault - * @ Nat Consumption Vector - * Refer to SDM Vol2 Table 5-6 & 8-1 - */ -void dnat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr) -{ - _nat_consumption_fault(vcpu, vadr, DATA); -} - -/* Deal with - * Page not present vector - */ -static void __page_not_present(struct kvm_vcpu *vcpu, u64 vadr) -{ - /* If vPSR.ic, IFA, ITIR */ - set_ifa_itir_iha(vcpu, vadr, 1, 1, 0); - inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR); -} - -void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) -{ - __page_not_present(vcpu, vadr); -} - -void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) -{ - __page_not_present(vcpu, vadr); -} - -/* Deal with - * Data access rights vector - */ -void data_access_rights(struct kvm_vcpu *vcpu, u64 vadr) -{ - /* If vPSR.ic, IFA, ITIR */ - set_ifa_itir_iha(vcpu, vadr, 1, 1, 0); - inject_guest_interruption(vcpu, IA64_DATA_ACCESS_RIGHTS_VECTOR); -} - -fpswa_ret_t vmm_fp_emulate(int fp_fault, void *bundle, unsigned long *ipsr, - unsigned long *fpsr, unsigned long *isr, unsigned long *pr, - unsigned long *ifs, struct kvm_pt_regs *regs) -{ - fp_state_t fp_state; - fpswa_ret_t ret; - struct kvm_vcpu *vcpu = current_vcpu; - - uint64_t old_rr7 = ia64_get_rr(7UL<<61); - - if (!vmm_fpswa_interface) - return (fpswa_ret_t) {-1, 0, 0, 0}; - - memset(&fp_state, 0, sizeof(fp_state_t)); - - /* - * compute fp_state. only FP registers f6 - f11 are used by the - * vmm, so set those bits in the mask and set the low volatile - * pointer to point to these registers. - */ - fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */ - - fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) ®s->f6; - - /* - * unsigned long (*EFI_FPSWA) ( - * unsigned long trap_type, - * void *Bundle, - * unsigned long *pipsr, - * unsigned long *pfsr, - * unsigned long *pisr, - * unsigned long *ppreds, - * unsigned long *pifs, - * void *fp_state); - */ - /*Call host fpswa interface directly to virtualize - *guest fpswa request! - */ - ia64_set_rr(7UL << 61, vcpu->arch.host.rr[7]); - ia64_srlz_d(); - - ret = (*vmm_fpswa_interface->fpswa) (fp_fault, bundle, - ipsr, fpsr, isr, pr, ifs, &fp_state); - ia64_set_rr(7UL << 61, old_rr7); - ia64_srlz_d(); - return ret; -} - -/* - * Handle floating-point assist faults and traps for domain. - */ -unsigned long vmm_handle_fpu_swa(int fp_fault, struct kvm_pt_regs *regs, - unsigned long isr) -{ - struct kvm_vcpu *v = current_vcpu; - IA64_BUNDLE bundle; - unsigned long fault_ip; - fpswa_ret_t ret; - - fault_ip = regs->cr_iip; - /* - * When the FP trap occurs, the trapping instruction is completed. - * If ipsr.ri == 0, there is the trapping instruction in previous - * bundle. - */ - if (!fp_fault && (ia64_psr(regs)->ri == 0)) - fault_ip -= 16; - - if (fetch_code(v, fault_ip, &bundle)) - return -EAGAIN; - - if (!bundle.i64[0] && !bundle.i64[1]) - return -EACCES; - - ret = vmm_fp_emulate(fp_fault, &bundle, ®s->cr_ipsr, ®s->ar_fpsr, - &isr, ®s->pr, ®s->cr_ifs, regs); - return ret.status; -} - -void reflect_interruption(u64 ifa, u64 isr, u64 iim, - u64 vec, struct kvm_pt_regs *regs) -{ - u64 vector; - int status ; - struct kvm_vcpu *vcpu = current_vcpu; - u64 vpsr = VCPU(vcpu, vpsr); - - vector = vec2off[vec]; - - if (!(vpsr & IA64_PSR_IC) && (vector != IA64_DATA_NESTED_TLB_VECTOR)) { - panic_vm(vcpu, "Interruption with vector :0x%lx occurs " - "with psr.ic = 0\n", vector); - return; - } - - switch (vec) { - case 32: /*IA64_FP_FAULT_VECTOR*/ - status = vmm_handle_fpu_swa(1, regs, isr); - if (!status) { - vcpu_increment_iip(vcpu); - return; - } else if (-EAGAIN == status) - return; - break; - case 33: /*IA64_FP_TRAP_VECTOR*/ - status = vmm_handle_fpu_swa(0, regs, isr); - if (!status) - return ; - break; - } - - VCPU(vcpu, isr) = isr; - VCPU(vcpu, iipa) = regs->cr_iip; - if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR) - VCPU(vcpu, iim) = iim; - else - set_ifa_itir_iha(vcpu, ifa, 1, 1, 1); - - inject_guest_interruption(vcpu, vector); -} - -static unsigned long kvm_trans_pal_call_args(struct kvm_vcpu *vcpu, - unsigned long arg) -{ - struct thash_data *data; - unsigned long gpa, poff; - - if (!is_physical_mode(vcpu)) { - /* Depends on caller to provide the DTR or DTC mapping.*/ - data = vtlb_lookup(vcpu, arg, D_TLB); - if (data) - gpa = data->page_flags & _PAGE_PPN_MASK; - else { - data = vhpt_lookup(arg); - if (!data) - return 0; - gpa = data->gpaddr & _PAGE_PPN_MASK; - } - - poff = arg & (PSIZE(data->ps) - 1); - arg = PAGEALIGN(gpa, data->ps) | poff; - } - arg = kvm_gpa_to_mpa(arg << 1 >> 1); - - return (unsigned long)__va(arg); -} - -static void set_pal_call_data(struct kvm_vcpu *vcpu) -{ - struct exit_ctl_data *p = &vcpu->arch.exit_data; - unsigned long gr28 = vcpu_get_gr(vcpu, 28); - unsigned long gr29 = vcpu_get_gr(vcpu, 29); - unsigned long gr30 = vcpu_get_gr(vcpu, 30); - - /*FIXME:For static and stacked convention, firmware - * has put the parameters in gr28-gr31 before - * break to vmm !!*/ - - switch (gr28) { - case PAL_PERF_MON_INFO: - case PAL_HALT_INFO: - p->u.pal_data.gr29 = kvm_trans_pal_call_args(vcpu, gr29); - p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30); - break; - case PAL_BRAND_INFO: - p->u.pal_data.gr29 = gr29; - p->u.pal_data.gr30 = kvm_trans_pal_call_args(vcpu, gr30); - break; - default: - p->u.pal_data.gr29 = gr29; - p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30); - } - p->u.pal_data.gr28 = gr28; - p->u.pal_data.gr31 = vcpu_get_gr(vcpu, 31); - - p->exit_reason = EXIT_REASON_PAL_CALL; -} - -static void get_pal_call_result(struct kvm_vcpu *vcpu) -{ - struct exit_ctl_data *p = &vcpu->arch.exit_data; - - if (p->exit_reason == EXIT_REASON_PAL_CALL) { - vcpu_set_gr(vcpu, 8, p->u.pal_data.ret.status, 0); - vcpu_set_gr(vcpu, 9, p->u.pal_data.ret.v0, 0); - vcpu_set_gr(vcpu, 10, p->u.pal_data.ret.v1, 0); - vcpu_set_gr(vcpu, 11, p->u.pal_data.ret.v2, 0); - } else - panic_vm(vcpu, "Mis-set for exit reason!\n"); -} - -static void set_sal_call_data(struct kvm_vcpu *vcpu) -{ - struct exit_ctl_data *p = &vcpu->arch.exit_data; - - p->u.sal_data.in0 = vcpu_get_gr(vcpu, 32); - p->u.sal_data.in1 = vcpu_get_gr(vcpu, 33); - p->u.sal_data.in2 = vcpu_get_gr(vcpu, 34); - p->u.sal_data.in3 = vcpu_get_gr(vcpu, 35); - p->u.sal_data.in4 = vcpu_get_gr(vcpu, 36); - p->u.sal_data.in5 = vcpu_get_gr(vcpu, 37); - p->u.sal_data.in6 = vcpu_get_gr(vcpu, 38); - p->u.sal_data.in7 = vcpu_get_gr(vcpu, 39); - p->exit_reason = EXIT_REASON_SAL_CALL; -} - -static void get_sal_call_result(struct kvm_vcpu *vcpu) -{ - struct exit_ctl_data *p = &vcpu->arch.exit_data; - - if (p->exit_reason == EXIT_REASON_SAL_CALL) { - vcpu_set_gr(vcpu, 8, p->u.sal_data.ret.r8, 0); - vcpu_set_gr(vcpu, 9, p->u.sal_data.ret.r9, 0); - vcpu_set_gr(vcpu, 10, p->u.sal_data.ret.r10, 0); - vcpu_set_gr(vcpu, 11, p->u.sal_data.ret.r11, 0); - } else - panic_vm(vcpu, "Mis-set for exit reason!\n"); -} - -void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs, - unsigned long isr, unsigned long iim) -{ - struct kvm_vcpu *v = current_vcpu; - long psr; - - if (ia64_psr(regs)->cpl == 0) { - /* Allow hypercalls only when cpl = 0. */ - if (iim == DOMN_PAL_REQUEST) { - local_irq_save(psr); - set_pal_call_data(v); - vmm_transition(v); - get_pal_call_result(v); - vcpu_increment_iip(v); - local_irq_restore(psr); - return; - } else if (iim == DOMN_SAL_REQUEST) { - local_irq_save(psr); - set_sal_call_data(v); - vmm_transition(v); - get_sal_call_result(v); - vcpu_increment_iip(v); - local_irq_restore(psr); - return; - } - } - reflect_interruption(ifa, isr, iim, 11, regs); -} - -void check_pending_irq(struct kvm_vcpu *vcpu) -{ - int mask, h_pending, h_inservice; - u64 isr; - unsigned long vpsr; - struct kvm_pt_regs *regs = vcpu_regs(vcpu); - - h_pending = highest_pending_irq(vcpu); - if (h_pending == NULL_VECTOR) { - update_vhpi(vcpu, NULL_VECTOR); - return; - } - h_inservice = highest_inservice_irq(vcpu); - - vpsr = VCPU(vcpu, vpsr); - mask = irq_masked(vcpu, h_pending, h_inservice); - if ((vpsr & IA64_PSR_I) && IRQ_NO_MASKED == mask) { - isr = vpsr & IA64_PSR_RI; - update_vhpi(vcpu, h_pending); - reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */ - } else if (mask == IRQ_MASKED_BY_INSVC) { - if (VCPU(vcpu, vhpi)) - update_vhpi(vcpu, NULL_VECTOR); - } else { - /* masked by vpsr.i or vtpr.*/ - update_vhpi(vcpu, h_pending); - } -} - -static void generate_exirq(struct kvm_vcpu *vcpu) -{ - unsigned vpsr; - uint64_t isr; - - struct kvm_pt_regs *regs = vcpu_regs(vcpu); - - vpsr = VCPU(vcpu, vpsr); - isr = vpsr & IA64_PSR_RI; - if (!(vpsr & IA64_PSR_IC)) - panic_vm(vcpu, "Trying to inject one IRQ with psr.ic=0\n"); - reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */ -} - -void vhpi_detection(struct kvm_vcpu *vcpu) -{ - uint64_t threshold, vhpi; - union ia64_tpr vtpr; - struct ia64_psr vpsr; - - vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); - vtpr.val = VCPU(vcpu, tpr); - - threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic; - vhpi = VCPU(vcpu, vhpi); - if (vhpi > threshold) { - /* interrupt actived*/ - generate_exirq(vcpu); - } -} - -void leave_hypervisor_tail(void) -{ - struct kvm_vcpu *v = current_vcpu; - - if (VMX(v, timer_check)) { - VMX(v, timer_check) = 0; - if (VMX(v, itc_check)) { - if (vcpu_get_itc(v) > VCPU(v, itm)) { - if (!(VCPU(v, itv) & (1 << 16))) { - vcpu_pend_interrupt(v, VCPU(v, itv) - & 0xff); - VMX(v, itc_check) = 0; - } else { - v->arch.timer_pending = 1; - } - VMX(v, last_itc) = VCPU(v, itm) + 1; - } - } - } - - rmb(); - if (v->arch.irq_new_pending) { - v->arch.irq_new_pending = 0; - VMX(v, irq_check) = 0; - check_pending_irq(v); - return; - } - if (VMX(v, irq_check)) { - VMX(v, irq_check) = 0; - vhpi_detection(v); - } -} - -static inline void handle_lds(struct kvm_pt_regs *regs) -{ - regs->cr_ipsr |= IA64_PSR_ED; -} - -void physical_tlb_miss(struct kvm_vcpu *vcpu, unsigned long vadr, int type) -{ - unsigned long pte; - union ia64_rr rr; - - rr.val = ia64_get_rr(vadr); - pte = vadr & _PAGE_PPN_MASK; - pte = pte | PHY_PAGE_WB; - thash_vhpt_insert(vcpu, pte, (u64)(rr.ps << 2), vadr, type); - return; -} - -void kvm_page_fault(u64 vadr , u64 vec, struct kvm_pt_regs *regs) -{ - unsigned long vpsr; - int type; - - u64 vhpt_adr, gppa, pteval, rr, itir; - union ia64_isr misr; - union ia64_pta vpta; - struct thash_data *data; - struct kvm_vcpu *v = current_vcpu; - - vpsr = VCPU(v, vpsr); - misr.val = VMX(v, cr_isr); - - type = vec; - - if (is_physical_mode(v) && (!(vadr << 1 >> 62))) { - if (vec == 2) { - if (__gpfn_is_io((vadr << 1) >> (PAGE_SHIFT + 1))) { - emulate_io_inst(v, ((vadr << 1) >> 1), 4); - return; - } - } - physical_tlb_miss(v, vadr, type); - return; - } - data = vtlb_lookup(v, vadr, type); - if (data != 0) { - if (type == D_TLB) { - gppa = (vadr & ((1UL << data->ps) - 1)) - + (data->ppn >> (data->ps - 12) << data->ps); - if (__gpfn_is_io(gppa >> PAGE_SHIFT)) { - if (data->pl >= ((regs->cr_ipsr >> - IA64_PSR_CPL0_BIT) & 3)) - emulate_io_inst(v, gppa, data->ma); - else { - vcpu_set_isr(v, misr.val); - data_access_rights(v, vadr); - } - return ; - } - } - thash_vhpt_insert(v, data->page_flags, data->itir, vadr, type); - - } else if (type == D_TLB) { - if (misr.sp) { - handle_lds(regs); - return; - } - - rr = vcpu_get_rr(v, vadr); - itir = rr & (RR_RID_MASK | RR_PS_MASK); - - if (!vhpt_enabled(v, vadr, misr.rs ? RSE_REF : DATA_REF)) { - if (vpsr & IA64_PSR_IC) { - vcpu_set_isr(v, misr.val); - alt_dtlb(v, vadr); - } else { - nested_dtlb(v); - } - return ; - } - - vpta.val = vcpu_get_pta(v); - /* avoid recursively walking (short format) VHPT */ - - vhpt_adr = vcpu_thash(v, vadr); - if (!guest_vhpt_lookup(vhpt_adr, &pteval)) { - /* VHPT successfully read. */ - if (!(pteval & _PAGE_P)) { - if (vpsr & IA64_PSR_IC) { - vcpu_set_isr(v, misr.val); - dtlb_fault(v, vadr); - } else { - nested_dtlb(v); - } - } else if ((pteval & _PAGE_MA_MASK) != _PAGE_MA_ST) { - thash_purge_and_insert(v, pteval, itir, - vadr, D_TLB); - } else if (vpsr & IA64_PSR_IC) { - vcpu_set_isr(v, misr.val); - dtlb_fault(v, vadr); - } else { - nested_dtlb(v); - } - } else { - /* Can't read VHPT. */ - if (vpsr & IA64_PSR_IC) { - vcpu_set_isr(v, misr.val); - dvhpt_fault(v, vadr); - } else { - nested_dtlb(v); - } - } - } else if (type == I_TLB) { - if (!(vpsr & IA64_PSR_IC)) - misr.ni = 1; - if (!vhpt_enabled(v, vadr, INST_REF)) { - vcpu_set_isr(v, misr.val); - alt_itlb(v, vadr); - return; - } - - vpta.val = vcpu_get_pta(v); - - vhpt_adr = vcpu_thash(v, vadr); - if (!guest_vhpt_lookup(vhpt_adr, &pteval)) { - /* VHPT successfully read. */ - if (pteval & _PAGE_P) { - if ((pteval & _PAGE_MA_MASK) == _PAGE_MA_ST) { - vcpu_set_isr(v, misr.val); - itlb_fault(v, vadr); - return ; - } - rr = vcpu_get_rr(v, vadr); - itir = rr & (RR_RID_MASK | RR_PS_MASK); - thash_purge_and_insert(v, pteval, itir, - vadr, I_TLB); - } else { - vcpu_set_isr(v, misr.val); - inst_page_not_present(v, vadr); - } - } else { - vcpu_set_isr(v, misr.val); - ivhpt_fault(v, vadr); - } - } -} - -void kvm_vexirq(struct kvm_vcpu *vcpu) -{ - u64 vpsr, isr; - struct kvm_pt_regs *regs; - - regs = vcpu_regs(vcpu); - vpsr = VCPU(vcpu, vpsr); - isr = vpsr & IA64_PSR_RI; - reflect_interruption(0, isr, 0, 12, regs); /*EXT IRQ*/ -} - -void kvm_ia64_handle_irq(struct kvm_vcpu *v) -{ - struct exit_ctl_data *p = &v->arch.exit_data; - long psr; - - local_irq_save(psr); - p->exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT; - vmm_transition(v); - local_irq_restore(psr); - - VMX(v, timer_check) = 1; - -} - -static void ptc_ga_remote_func(struct kvm_vcpu *v, int pos) -{ - u64 oldrid, moldrid, oldpsbits, vaddr; - struct kvm_ptc_g *p = &v->arch.ptc_g_data[pos]; - vaddr = p->vaddr; - - oldrid = VMX(v, vrr[0]); - VMX(v, vrr[0]) = p->rr; - oldpsbits = VMX(v, psbits[0]); - VMX(v, psbits[0]) = VMX(v, psbits[REGION_NUMBER(vaddr)]); - moldrid = ia64_get_rr(0x0); - ia64_set_rr(0x0, vrrtomrr(p->rr)); - ia64_srlz_d(); - - vaddr = PAGEALIGN(vaddr, p->ps); - thash_purge_entries_remote(v, vaddr, p->ps); - - VMX(v, vrr[0]) = oldrid; - VMX(v, psbits[0]) = oldpsbits; - ia64_set_rr(0x0, moldrid); - ia64_dv_serialize_data(); -} - -static void vcpu_do_resume(struct kvm_vcpu *vcpu) -{ - /*Re-init VHPT and VTLB once from resume*/ - vcpu->arch.vhpt.num = VHPT_NUM_ENTRIES; - thash_init(&vcpu->arch.vhpt, VHPT_SHIFT); - vcpu->arch.vtlb.num = VTLB_NUM_ENTRIES; - thash_init(&vcpu->arch.vtlb, VTLB_SHIFT); - - ia64_set_pta(vcpu->arch.vhpt.pta.val); -} - -static void vmm_sanity_check(struct kvm_vcpu *vcpu) -{ - struct exit_ctl_data *p = &vcpu->arch.exit_data; - - if (!vmm_sanity && p->exit_reason != EXIT_REASON_DEBUG) { - panic_vm(vcpu, "Failed to do vmm sanity check," - "it maybe caused by crashed vmm!!\n\n"); - } -} - -static void kvm_do_resume_op(struct kvm_vcpu *vcpu) -{ - vmm_sanity_check(vcpu); /*Guarantee vcpu running on healthy vmm!*/ - - if (test_and_clear_bit(KVM_REQ_RESUME, &vcpu->requests)) { - vcpu_do_resume(vcpu); - return; - } - - if (unlikely(test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))) { - thash_purge_all(vcpu); - return; - } - - if (test_and_clear_bit(KVM_REQ_PTC_G, &vcpu->requests)) { - while (vcpu->arch.ptc_g_count > 0) - ptc_ga_remote_func(vcpu, --vcpu->arch.ptc_g_count); - } -} - -void vmm_transition(struct kvm_vcpu *vcpu) -{ - ia64_call_vsa(PAL_VPS_SAVE, (unsigned long)vcpu->arch.vpd, - 1, 0, 0, 0, 0, 0); - vmm_trampoline(&vcpu->arch.guest, &vcpu->arch.host); - ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)vcpu->arch.vpd, - 1, 0, 0, 0, 0, 0); - kvm_do_resume_op(vcpu); -} - -void vmm_panic_handler(u64 vec) -{ - struct kvm_vcpu *vcpu = current_vcpu; - vmm_sanity = 0; - panic_vm(vcpu, "Unexpected interruption occurs in VMM, vector:0x%lx\n", - vec2off[vec]); -} diff --git a/arch/ia64/kvm/trampoline.S b/arch/ia64/kvm/trampoline.S deleted file mode 100644 index 30897d44d61e..000000000000 --- a/arch/ia64/kvm/trampoline.S +++ /dev/null @@ -1,1038 +0,0 @@ -/* Save all processor states - * - * Copyright (c) 2007 Fleming Feng - * Copyright (c) 2007 Anthony Xu - */ - -#include -#include "asm-offsets.h" - - -#define CTX(name) VMM_CTX_##name##_OFFSET - - /* - * r32: context_t base address - */ -#define SAVE_BRANCH_REGS \ - add r2 = CTX(B0),r32; \ - add r3 = CTX(B1),r32; \ - mov r16 = b0; \ - mov r17 = b1; \ - ;; \ - st8 [r2]=r16,16; \ - st8 [r3]=r17,16; \ - ;; \ - mov r16 = b2; \ - mov r17 = b3; \ - ;; \ - st8 [r2]=r16,16; \ - st8 [r3]=r17,16; \ - ;; \ - mov r16 = b4; \ - mov r17 = b5; \ - ;; \ - st8 [r2]=r16; \ - st8 [r3]=r17; \ - ;; - - /* - * r33: context_t base address - */ -#define RESTORE_BRANCH_REGS \ - add r2 = CTX(B0),r33; \ - add r3 = CTX(B1),r33; \ - ;; \ - ld8 r16=[r2],16; \ - ld8 r17=[r3],16; \ - ;; \ - mov b0 = r16; \ - mov b1 = r17; \ - ;; \ - ld8 r16=[r2],16; \ - ld8 r17=[r3],16; \ - ;; \ - mov b2 = r16; \ - mov b3 = r17; \ - ;; \ - ld8 r16=[r2]; \ - ld8 r17=[r3]; \ - ;; \ - mov b4=r16; \ - mov b5=r17; \ - ;; - - - /* - * r32: context_t base address - * bsw == 1 - * Save all bank1 general registers, r4 ~ r7 - */ -#define SAVE_GENERAL_REGS \ - add r2=CTX(R4),r32; \ - add r3=CTX(R5),r32; \ - ;; \ -.mem.offset 0,0; \ - st8.spill [r2]=r4,16; \ -.mem.offset 8,0; \ - st8.spill [r3]=r5,16; \ - ;; \ -.mem.offset 0,0; \ - st8.spill [r2]=r6,48; \ -.mem.offset 8,0; \ - st8.spill [r3]=r7,48; \ - ;; \ -.mem.offset 0,0; \ - st8.spill [r2]=r12; \ -.mem.offset 8,0; \ - st8.spill [r3]=r13; \ - ;; - - /* - * r33: context_t base address - * bsw == 1 - */ -#define RESTORE_GENERAL_REGS \ - add r2=CTX(R4),r33; \ - add r3=CTX(R5),r33; \ - ;; \ - ld8.fill r4=[r2],16; \ - ld8.fill r5=[r3],16; \ - ;; \ - ld8.fill r6=[r2],48; \ - ld8.fill r7=[r3],48; \ - ;; \ - ld8.fill r12=[r2]; \ - ld8.fill r13 =[r3]; \ - ;; - - - - - /* - * r32: context_t base address - */ -#define SAVE_KERNEL_REGS \ - add r2 = CTX(KR0),r32; \ - add r3 = CTX(KR1),r32; \ - mov r16 = ar.k0; \ - mov r17 = ar.k1; \ - ;; \ - st8 [r2] = r16,16; \ - st8 [r3] = r17,16; \ - ;; \ - mov r16 = ar.k2; \ - mov r17 = ar.k3; \ - ;; \ - st8 [r2] = r16,16; \ - st8 [r3] = r17,16; \ - ;; \ - mov r16 = ar.k4; \ - mov r17 = ar.k5; \ - ;; \ - st8 [r2] = r16,16; \ - st8 [r3] = r17,16; \ - ;; \ - mov r16 = ar.k6; \ - mov r17 = ar.k7; \ - ;; \ - st8 [r2] = r16; \ - st8 [r3] = r17; \ - ;; - - - - /* - * r33: context_t base address - */ -#define RESTORE_KERNEL_REGS \ - add r2 = CTX(KR0),r33; \ - add r3 = CTX(KR1),r33; \ - ;; \ - ld8 r16=[r2],16; \ - ld8 r17=[r3],16; \ - ;; \ - mov ar.k0=r16; \ - mov ar.k1=r17; \ - ;; \ - ld8 r16=[r2],16; \ - ld8 r17=[r3],16; \ - ;; \ - mov ar.k2=r16; \ - mov ar.k3=r17; \ - ;; \ - ld8 r16=[r2],16; \ - ld8 r17=[r3],16; \ - ;; \ - mov ar.k4=r16; \ - mov ar.k5=r17; \ - ;; \ - ld8 r16=[r2],16; \ - ld8 r17=[r3],16; \ - ;; \ - mov ar.k6=r16; \ - mov ar.k7=r17; \ - ;; - - - - /* - * r32: context_t base address - */ -#define SAVE_APP_REGS \ - add r2 = CTX(BSPSTORE),r32; \ - mov r16 = ar.bspstore; \ - ;; \ - st8 [r2] = r16,CTX(RNAT)-CTX(BSPSTORE);\ - mov r16 = ar.rnat; \ - ;; \ - st8 [r2] = r16,CTX(FCR)-CTX(RNAT); \ - mov r16 = ar.fcr; \ - ;; \ - st8 [r2] = r16,CTX(EFLAG)-CTX(FCR); \ - mov r16 = ar.eflag; \ - ;; \ - st8 [r2] = r16,CTX(CFLG)-CTX(EFLAG); \ - mov r16 = ar.cflg; \ - ;; \ - st8 [r2] = r16,CTX(FSR)-CTX(CFLG); \ - mov r16 = ar.fsr; \ - ;; \ - st8 [r2] = r16,CTX(FIR)-CTX(FSR); \ - mov r16 = ar.fir; \ - ;; \ - st8 [r2] = r16,CTX(FDR)-CTX(FIR); \ - mov r16 = ar.fdr; \ - ;; \ - st8 [r2] = r16,CTX(UNAT)-CTX(FDR); \ - mov r16 = ar.unat; \ - ;; \ - st8 [r2] = r16,CTX(FPSR)-CTX(UNAT); \ - mov r16 = ar.fpsr; \ - ;; \ - st8 [r2] = r16,CTX(PFS)-CTX(FPSR); \ - mov r16 = ar.pfs; \ - ;; \ - st8 [r2] = r16,CTX(LC)-CTX(PFS); \ - mov r16 = ar.lc; \ - ;; \ - st8 [r2] = r16; \ - ;; - - /* - * r33: context_t base address - */ -#define RESTORE_APP_REGS \ - add r2=CTX(BSPSTORE),r33; \ - ;; \ - ld8 r16=[r2],CTX(RNAT)-CTX(BSPSTORE); \ - ;; \ - mov ar.bspstore=r16; \ - ld8 r16=[r2],CTX(FCR)-CTX(RNAT); \ - ;; \ - mov ar.rnat=r16; \ - ld8 r16=[r2],CTX(EFLAG)-CTX(FCR); \ - ;; \ - mov ar.fcr=r16; \ - ld8 r16=[r2],CTX(CFLG)-CTX(EFLAG); \ - ;; \ - mov ar.eflag=r16; \ - ld8 r16=[r2],CTX(FSR)-CTX(CFLG); \ - ;; \ - mov ar.cflg=r16; \ - ld8 r16=[r2],CTX(FIR)-CTX(FSR); \ - ;; \ - mov ar.fsr=r16; \ - ld8 r16=[r2],CTX(FDR)-CTX(FIR); \ - ;; \ - mov ar.fir=r16; \ - ld8 r16=[r2],CTX(UNAT)-CTX(FDR); \ - ;; \ - mov ar.fdr=r16; \ - ld8 r16=[r2],CTX(FPSR)-CTX(UNAT); \ - ;; \ - mov ar.unat=r16; \ - ld8 r16=[r2],CTX(PFS)-CTX(FPSR); \ - ;; \ - mov ar.fpsr=r16; \ - ld8 r16=[r2],CTX(LC)-CTX(PFS); \ - ;; \ - mov ar.pfs=r16; \ - ld8 r16=[r2]; \ - ;; \ - mov ar.lc=r16; \ - ;; - - /* - * r32: context_t base address - */ -#define SAVE_CTL_REGS \ - add r2 = CTX(DCR),r32; \ - mov r16 = cr.dcr; \ - ;; \ - st8 [r2] = r16,CTX(IVA)-CTX(DCR); \ - ;; \ - mov r16 = cr.iva; \ - ;; \ - st8 [r2] = r16,CTX(PTA)-CTX(IVA); \ - ;; \ - mov r16 = cr.pta; \ - ;; \ - st8 [r2] = r16 ; \ - ;; - - /* - * r33: context_t base address - */ -#define RESTORE_CTL_REGS \ - add r2 = CTX(DCR),r33; \ - ;; \ - ld8 r16 = [r2],CTX(IVA)-CTX(DCR); \ - ;; \ - mov cr.dcr = r16; \ - dv_serialize_data; \ - ;; \ - ld8 r16 = [r2],CTX(PTA)-CTX(IVA); \ - ;; \ - mov cr.iva = r16; \ - dv_serialize_data; \ - ;; \ - ld8 r16 = [r2]; \ - ;; \ - mov cr.pta = r16; \ - dv_serialize_data; \ - ;; - - - /* - * r32: context_t base address - */ -#define SAVE_REGION_REGS \ - add r2=CTX(RR0),r32; \ - mov r16=rr[r0]; \ - dep.z r18=1,61,3; \ - ;; \ - st8 [r2]=r16,8; \ - mov r17=rr[r18]; \ - dep.z r18=2,61,3; \ - ;; \ - st8 [r2]=r17,8; \ - mov r16=rr[r18]; \ - dep.z r18=3,61,3; \ - ;; \ - st8 [r2]=r16,8; \ - mov r17=rr[r18]; \ - dep.z r18=4,61,3; \ - ;; \ - st8 [r2]=r17,8; \ - mov r16=rr[r18]; \ - dep.z r18=5,61,3; \ - ;; \ - st8 [r2]=r16,8; \ - mov r17=rr[r18]; \ - dep.z r18=7,61,3; \ - ;; \ - st8 [r2]=r17,16; \ - mov r16=rr[r18]; \ - ;; \ - st8 [r2]=r16,8; \ - ;; - - /* - * r33:context_t base address - */ -#define RESTORE_REGION_REGS \ - add r2=CTX(RR0),r33;\ - mov r18=r0; \ - ;; \ - ld8 r20=[r2],8; \ - ;; /* rr0 */ \ - ld8 r21=[r2],8; \ - ;; /* rr1 */ \ - ld8 r22=[r2],8; \ - ;; /* rr2 */ \ - ld8 r23=[r2],8; \ - ;; /* rr3 */ \ - ld8 r24=[r2],8; \ - ;; /* rr4 */ \ - ld8 r25=[r2],16; \ - ;; /* rr5 */ \ - ld8 r27=[r2]; \ - ;; /* rr7 */ \ - mov rr[r18]=r20; \ - dep.z r18=1,61,3; \ - ;; /* rr1 */ \ - mov rr[r18]=r21; \ - dep.z r18=2,61,3; \ - ;; /* rr2 */ \ - mov rr[r18]=r22; \ - dep.z r18=3,61,3; \ - ;; /* rr3 */ \ - mov rr[r18]=r23; \ - dep.z r18=4,61,3; \ - ;; /* rr4 */ \ - mov rr[r18]=r24; \ - dep.z r18=5,61,3; \ - ;; /* rr5 */ \ - mov rr[r18]=r25; \ - dep.z r18=7,61,3; \ - ;; /* rr7 */ \ - mov rr[r18]=r27; \ - ;; \ - srlz.i; \ - ;; - - - - /* - * r32: context_t base address - * r36~r39:scratch registers - */ -#define SAVE_DEBUG_REGS \ - add r2=CTX(IBR0),r32; \ - add r3=CTX(DBR0),r32; \ - mov r16=ibr[r0]; \ - mov r17=dbr[r0]; \ - ;; \ - st8 [r2]=r16,8; \ - st8 [r3]=r17,8; \ - add r18=1,r0; \ - ;; \ - mov r16=ibr[r18]; \ - mov r17=dbr[r18]; \ - ;; \ - st8 [r2]=r16,8; \ - st8 [r3]=r17,8; \ - add r18=2,r0; \ - ;; \ - mov r16=ibr[r18]; \ - mov r17=dbr[r18]; \ - ;; \ - st8 [r2]=r16,8; \ - st8 [r3]=r17,8; \ - add r18=2,r0; \ - ;; \ - mov r16=ibr[r18]; \ - mov r17=dbr[r18]; \ - ;; \ - st8 [r2]=r16,8; \ - st8 [r3]=r17,8; \ - add r18=3,r0; \ - ;; \ - mov r16=ibr[r18]; \ - mov r17=dbr[r18]; \ - ;; \ - st8 [r2]=r16,8; \ - st8 [r3]=r17,8; \ - add r18=4,r0; \ - ;; \ - mov r16=ibr[r18]; \ - mov r17=dbr[r18]; \ - ;; \ - st8 [r2]=r16,8; \ - st8 [r3]=r17,8; \ - add r18=5,r0; \ - ;; \ - mov r16=ibr[r18]; \ - mov r17=dbr[r18]; \ - ;; \ - st8 [r2]=r16,8; \ - st8 [r3]=r17,8; \ - add r18=6,r0; \ - ;; \ - mov r16=ibr[r18]; \ - mov r17=dbr[r18]; \ - ;; \ - st8 [r2]=r16,8; \ - st8 [r3]=r17,8; \ - add r18=7,r0; \ - ;; \ - mov r16=ibr[r18]; \ - mov r17=dbr[r18]; \ - ;; \ - st8 [r2]=r16,8; \ - st8 [r3]=r17,8; \ - ;; - - -/* - * r33: point to context_t structure - * ar.lc are corrupted. - */ -#define RESTORE_DEBUG_REGS \ - add r2=CTX(IBR0),r33; \ - add r3=CTX(DBR0),r33; \ - mov r16=7; \ - mov r17=r0; \ - ;; \ - mov ar.lc = r16; \ - ;; \ -1: \ - ld8 r18=[r2],8; \ - ld8 r19=[r3],8; \ - ;; \ - mov ibr[r17]=r18; \ - mov dbr[r17]=r19; \ - ;; \ - srlz.i; \ - ;; \ - add r17=1,r17; \ - br.cloop.sptk 1b; \ - ;; - - - /* - * r32: context_t base address - */ -#define SAVE_FPU_LOW \ - add r2=CTX(F2),r32; \ - add r3=CTX(F3),r32; \ - ;; \ - stf.spill.nta [r2]=f2,32; \ - stf.spill.nta [r3]=f3,32; \ - ;; \ - stf.spill.nta [r2]=f4,32; \ - stf.spill.nta [r3]=f5,32; \ - ;; \ - stf.spill.nta [r2]=f6,32; \ - stf.spill.nta [r3]=f7,32; \ - ;; \ - stf.spill.nta [r2]=f8,32; \ - stf.spill.nta [r3]=f9,32; \ - ;; \ - stf.spill.nta [r2]=f10,32; \ - stf.spill.nta [r3]=f11,32; \ - ;; \ - stf.spill.nta [r2]=f12,32; \ - stf.spill.nta [r3]=f13,32; \ - ;; \ - stf.spill.nta [r2]=f14,32; \ - stf.spill.nta [r3]=f15,32; \ - ;; \ - stf.spill.nta [r2]=f16,32; \ - stf.spill.nta [r3]=f17,32; \ - ;; \ - stf.spill.nta [r2]=f18,32; \ - stf.spill.nta [r3]=f19,32; \ - ;; \ - stf.spill.nta [r2]=f20,32; \ - stf.spill.nta [r3]=f21,32; \ - ;; \ - stf.spill.nta [r2]=f22,32; \ - stf.spill.nta [r3]=f23,32; \ - ;; \ - stf.spill.nta [r2]=f24,32; \ - stf.spill.nta [r3]=f25,32; \ - ;; \ - stf.spill.nta [r2]=f26,32; \ - stf.spill.nta [r3]=f27,32; \ - ;; \ - stf.spill.nta [r2]=f28,32; \ - stf.spill.nta [r3]=f29,32; \ - ;; \ - stf.spill.nta [r2]=f30; \ - stf.spill.nta [r3]=f31; \ - ;; - - /* - * r32: context_t base address - */ -#define SAVE_FPU_HIGH \ - add r2=CTX(F32),r32; \ - add r3=CTX(F33),r32; \ - ;; \ - stf.spill.nta [r2]=f32,32; \ - stf.spill.nta [r3]=f33,32; \ - ;; \ - stf.spill.nta [r2]=f34,32; \ - stf.spill.nta [r3]=f35,32; \ - ;; \ - stf.spill.nta [r2]=f36,32; \ - stf.spill.nta [r3]=f37,32; \ - ;; \ - stf.spill.nta [r2]=f38,32; \ - stf.spill.nta [r3]=f39,32; \ - ;; \ - stf.spill.nta [r2]=f40,32; \ - stf.spill.nta [r3]=f41,32; \ - ;; \ - stf.spill.nta [r2]=f42,32; \ - stf.spill.nta [r3]=f43,32; \ - ;; \ - stf.spill.nta [r2]=f44,32; \ - stf.spill.nta [r3]=f45,32; \ - ;; \ - stf.spill.nta [r2]=f46,32; \ - stf.spill.nta [r3]=f47,32; \ - ;; \ - stf.spill.nta [r2]=f48,32; \ - stf.spill.nta [r3]=f49,32; \ - ;; \ - stf.spill.nta [r2]=f50,32; \ - stf.spill.nta [r3]=f51,32; \ - ;; \ - stf.spill.nta [r2]=f52,32; \ - stf.spill.nta [r3]=f53,32; \ - ;; \ - stf.spill.nta [r2]=f54,32; \ - stf.spill.nta [r3]=f55,32; \ - ;; \ - stf.spill.nta [r2]=f56,32; \ - stf.spill.nta [r3]=f57,32; \ - ;; \ - stf.spill.nta [r2]=f58,32; \ - stf.spill.nta [r3]=f59,32; \ - ;; \ - stf.spill.nta [r2]=f60,32; \ - stf.spill.nta [r3]=f61,32; \ - ;; \ - stf.spill.nta [r2]=f62,32; \ - stf.spill.nta [r3]=f63,32; \ - ;; \ - stf.spill.nta [r2]=f64,32; \ - stf.spill.nta [r3]=f65,32; \ - ;; \ - stf.spill.nta [r2]=f66,32; \ - stf.spill.nta [r3]=f67,32; \ - ;; \ - stf.spill.nta [r2]=f68,32; \ - stf.spill.nta [r3]=f69,32; \ - ;; \ - stf.spill.nta [r2]=f70,32; \ - stf.spill.nta [r3]=f71,32; \ - ;; \ - stf.spill.nta [r2]=f72,32; \ - stf.spill.nta [r3]=f73,32; \ - ;; \ - stf.spill.nta [r2]=f74,32; \ - stf.spill.nta [r3]=f75,32; \ - ;; \ - stf.spill.nta [r2]=f76,32; \ - stf.spill.nta [r3]=f77,32; \ - ;; \ - stf.spill.nta [r2]=f78,32; \ - stf.spill.nta [r3]=f79,32; \ - ;; \ - stf.spill.nta [r2]=f80,32; \ - stf.spill.nta [r3]=f81,32; \ - ;; \ - stf.spill.nta [r2]=f82,32; \ - stf.spill.nta [r3]=f83,32; \ - ;; \ - stf.spill.nta [r2]=f84,32; \ - stf.spill.nta [r3]=f85,32; \ - ;; \ - stf.spill.nta [r2]=f86,32; \ - stf.spill.nta [r3]=f87,32; \ - ;; \ - stf.spill.nta [r2]=f88,32; \ - stf.spill.nta [r3]=f89,32; \ - ;; \ - stf.spill.nta [r2]=f90,32; \ - stf.spill.nta [r3]=f91,32; \ - ;; \ - stf.spill.nta [r2]=f92,32; \ - stf.spill.nta [r3]=f93,32; \ - ;; \ - stf.spill.nta [r2]=f94,32; \ - stf.spill.nta [r3]=f95,32; \ - ;; \ - stf.spill.nta [r2]=f96,32; \ - stf.spill.nta [r3]=f97,32; \ - ;; \ - stf.spill.nta [r2]=f98,32; \ - stf.spill.nta [r3]=f99,32; \ - ;; \ - stf.spill.nta [r2]=f100,32; \ - stf.spill.nta [r3]=f101,32; \ - ;; \ - stf.spill.nta [r2]=f102,32; \ - stf.spill.nta [r3]=f103,32; \ - ;; \ - stf.spill.nta [r2]=f104,32; \ - stf.spill.nta [r3]=f105,32; \ - ;; \ - stf.spill.nta [r2]=f106,32; \ - stf.spill.nta [r3]=f107,32; \ - ;; \ - stf.spill.nta [r2]=f108,32; \ - stf.spill.nta [r3]=f109,32; \ - ;; \ - stf.spill.nta [r2]=f110,32; \ - stf.spill.nta [r3]=f111,32; \ - ;; \ - stf.spill.nta [r2]=f112,32; \ - stf.spill.nta [r3]=f113,32; \ - ;; \ - stf.spill.nta [r2]=f114,32; \ - stf.spill.nta [r3]=f115,32; \ - ;; \ - stf.spill.nta [r2]=f116,32; \ - stf.spill.nta [r3]=f117,32; \ - ;; \ - stf.spill.nta [r2]=f118,32; \ - stf.spill.nta [r3]=f119,32; \ - ;; \ - stf.spill.nta [r2]=f120,32; \ - stf.spill.nta [r3]=f121,32; \ - ;; \ - stf.spill.nta [r2]=f122,32; \ - stf.spill.nta [r3]=f123,32; \ - ;; \ - stf.spill.nta [r2]=f124,32; \ - stf.spill.nta [r3]=f125,32; \ - ;; \ - stf.spill.nta [r2]=f126; \ - stf.spill.nta [r3]=f127; \ - ;; - - /* - * r33: point to context_t structure - */ -#define RESTORE_FPU_LOW \ - add r2 = CTX(F2), r33; \ - add r3 = CTX(F3), r33; \ - ;; \ - ldf.fill.nta f2 = [r2], 32; \ - ldf.fill.nta f3 = [r3], 32; \ - ;; \ - ldf.fill.nta f4 = [r2], 32; \ - ldf.fill.nta f5 = [r3], 32; \ - ;; \ - ldf.fill.nta f6 = [r2], 32; \ - ldf.fill.nta f7 = [r3], 32; \ - ;; \ - ldf.fill.nta f8 = [r2], 32; \ - ldf.fill.nta f9 = [r3], 32; \ - ;; \ - ldf.fill.nta f10 = [r2], 32; \ - ldf.fill.nta f11 = [r3], 32; \ - ;; \ - ldf.fill.nta f12 = [r2], 32; \ - ldf.fill.nta f13 = [r3], 32; \ - ;; \ - ldf.fill.nta f14 = [r2], 32; \ - ldf.fill.nta f15 = [r3], 32; \ - ;; \ - ldf.fill.nta f16 = [r2], 32; \ - ldf.fill.nta f17 = [r3], 32; \ - ;; \ - ldf.fill.nta f18 = [r2], 32; \ - ldf.fill.nta f19 = [r3], 32; \ - ;; \ - ldf.fill.nta f20 = [r2], 32; \ - ldf.fill.nta f21 = [r3], 32; \ - ;; \ - ldf.fill.nta f22 = [r2], 32; \ - ldf.fill.nta f23 = [r3], 32; \ - ;; \ - ldf.fill.nta f24 = [r2], 32; \ - ldf.fill.nta f25 = [r3], 32; \ - ;; \ - ldf.fill.nta f26 = [r2], 32; \ - ldf.fill.nta f27 = [r3], 32; \ - ;; \ - ldf.fill.nta f28 = [r2], 32; \ - ldf.fill.nta f29 = [r3], 32; \ - ;; \ - ldf.fill.nta f30 = [r2], 32; \ - ldf.fill.nta f31 = [r3], 32; \ - ;; - - - - /* - * r33: point to context_t structure - */ -#define RESTORE_FPU_HIGH \ - add r2 = CTX(F32), r33; \ - add r3 = CTX(F33), r33; \ - ;; \ - ldf.fill.nta f32 = [r2], 32; \ - ldf.fill.nta f33 = [r3], 32; \ - ;; \ - ldf.fill.nta f34 = [r2], 32; \ - ldf.fill.nta f35 = [r3], 32; \ - ;; \ - ldf.fill.nta f36 = [r2], 32; \ - ldf.fill.nta f37 = [r3], 32; \ - ;; \ - ldf.fill.nta f38 = [r2], 32; \ - ldf.fill.nta f39 = [r3], 32; \ - ;; \ - ldf.fill.nta f40 = [r2], 32; \ - ldf.fill.nta f41 = [r3], 32; \ - ;; \ - ldf.fill.nta f42 = [r2], 32; \ - ldf.fill.nta f43 = [r3], 32; \ - ;; \ - ldf.fill.nta f44 = [r2], 32; \ - ldf.fill.nta f45 = [r3], 32; \ - ;; \ - ldf.fill.nta f46 = [r2], 32; \ - ldf.fill.nta f47 = [r3], 32; \ - ;; \ - ldf.fill.nta f48 = [r2], 32; \ - ldf.fill.nta f49 = [r3], 32; \ - ;; \ - ldf.fill.nta f50 = [r2], 32; \ - ldf.fill.nta f51 = [r3], 32; \ - ;; \ - ldf.fill.nta f52 = [r2], 32; \ - ldf.fill.nta f53 = [r3], 32; \ - ;; \ - ldf.fill.nta f54 = [r2], 32; \ - ldf.fill.nta f55 = [r3], 32; \ - ;; \ - ldf.fill.nta f56 = [r2], 32; \ - ldf.fill.nta f57 = [r3], 32; \ - ;; \ - ldf.fill.nta f58 = [r2], 32; \ - ldf.fill.nta f59 = [r3], 32; \ - ;; \ - ldf.fill.nta f60 = [r2], 32; \ - ldf.fill.nta f61 = [r3], 32; \ - ;; \ - ldf.fill.nta f62 = [r2], 32; \ - ldf.fill.nta f63 = [r3], 32; \ - ;; \ - ldf.fill.nta f64 = [r2], 32; \ - ldf.fill.nta f65 = [r3], 32; \ - ;; \ - ldf.fill.nta f66 = [r2], 32; \ - ldf.fill.nta f67 = [r3], 32; \ - ;; \ - ldf.fill.nta f68 = [r2], 32; \ - ldf.fill.nta f69 = [r3], 32; \ - ;; \ - ldf.fill.nta f70 = [r2], 32; \ - ldf.fill.nta f71 = [r3], 32; \ - ;; \ - ldf.fill.nta f72 = [r2], 32; \ - ldf.fill.nta f73 = [r3], 32; \ - ;; \ - ldf.fill.nta f74 = [r2], 32; \ - ldf.fill.nta f75 = [r3], 32; \ - ;; \ - ldf.fill.nta f76 = [r2], 32; \ - ldf.fill.nta f77 = [r3], 32; \ - ;; \ - ldf.fill.nta f78 = [r2], 32; \ - ldf.fill.nta f79 = [r3], 32; \ - ;; \ - ldf.fill.nta f80 = [r2], 32; \ - ldf.fill.nta f81 = [r3], 32; \ - ;; \ - ldf.fill.nta f82 = [r2], 32; \ - ldf.fill.nta f83 = [r3], 32; \ - ;; \ - ldf.fill.nta f84 = [r2], 32; \ - ldf.fill.nta f85 = [r3], 32; \ - ;; \ - ldf.fill.nta f86 = [r2], 32; \ - ldf.fill.nta f87 = [r3], 32; \ - ;; \ - ldf.fill.nta f88 = [r2], 32; \ - ldf.fill.nta f89 = [r3], 32; \ - ;; \ - ldf.fill.nta f90 = [r2], 32; \ - ldf.fill.nta f91 = [r3], 32; \ - ;; \ - ldf.fill.nta f92 = [r2], 32; \ - ldf.fill.nta f93 = [r3], 32; \ - ;; \ - ldf.fill.nta f94 = [r2], 32; \ - ldf.fill.nta f95 = [r3], 32; \ - ;; \ - ldf.fill.nta f96 = [r2], 32; \ - ldf.fill.nta f97 = [r3], 32; \ - ;; \ - ldf.fill.nta f98 = [r2], 32; \ - ldf.fill.nta f99 = [r3], 32; \ - ;; \ - ldf.fill.nta f100 = [r2], 32; \ - ldf.fill.nta f101 = [r3], 32; \ - ;; \ - ldf.fill.nta f102 = [r2], 32; \ - ldf.fill.nta f103 = [r3], 32; \ - ;; \ - ldf.fill.nta f104 = [r2], 32; \ - ldf.fill.nta f105 = [r3], 32; \ - ;; \ - ldf.fill.nta f106 = [r2], 32; \ - ldf.fill.nta f107 = [r3], 32; \ - ;; \ - ldf.fill.nta f108 = [r2], 32; \ - ldf.fill.nta f109 = [r3], 32; \ - ;; \ - ldf.fill.nta f110 = [r2], 32; \ - ldf.fill.nta f111 = [r3], 32; \ - ;; \ - ldf.fill.nta f112 = [r2], 32; \ - ldf.fill.nta f113 = [r3], 32; \ - ;; \ - ldf.fill.nta f114 = [r2], 32; \ - ldf.fill.nta f115 = [r3], 32; \ - ;; \ - ldf.fill.nta f116 = [r2], 32; \ - ldf.fill.nta f117 = [r3], 32; \ - ;; \ - ldf.fill.nta f118 = [r2], 32; \ - ldf.fill.nta f119 = [r3], 32; \ - ;; \ - ldf.fill.nta f120 = [r2], 32; \ - ldf.fill.nta f121 = [r3], 32; \ - ;; \ - ldf.fill.nta f122 = [r2], 32; \ - ldf.fill.nta f123 = [r3], 32; \ - ;; \ - ldf.fill.nta f124 = [r2], 32; \ - ldf.fill.nta f125 = [r3], 32; \ - ;; \ - ldf.fill.nta f126 = [r2], 32; \ - ldf.fill.nta f127 = [r3], 32; \ - ;; - - /* - * r32: context_t base address - */ -#define SAVE_PTK_REGS \ - add r2=CTX(PKR0), r32; \ - mov r16=7; \ - ;; \ - mov ar.lc=r16; \ - mov r17=r0; \ - ;; \ -1: \ - mov r18=pkr[r17]; \ - ;; \ - srlz.i; \ - ;; \ - st8 [r2]=r18, 8; \ - ;; \ - add r17 =1,r17; \ - ;; \ - br.cloop.sptk 1b; \ - ;; - -/* - * r33: point to context_t structure - * ar.lc are corrupted. - */ -#define RESTORE_PTK_REGS \ - add r2=CTX(PKR0), r33; \ - mov r16=7; \ - ;; \ - mov ar.lc=r16; \ - mov r17=r0; \ - ;; \ -1: \ - ld8 r18=[r2], 8; \ - ;; \ - mov pkr[r17]=r18; \ - ;; \ - srlz.i; \ - ;; \ - add r17 =1,r17; \ - ;; \ - br.cloop.sptk 1b; \ - ;; - - -/* - * void vmm_trampoline( context_t * from, - * context_t * to) - * - * from: r32 - * to: r33 - * note: interrupt disabled before call this function. - */ -GLOBAL_ENTRY(vmm_trampoline) - mov r16 = psr - adds r2 = CTX(PSR), r32 - ;; - st8 [r2] = r16, 8 // psr - mov r17 = pr - ;; - st8 [r2] = r17, 8 // pr - mov r18 = ar.unat - ;; - st8 [r2] = r18 - mov r17 = ar.rsc - ;; - adds r2 = CTX(RSC),r32 - ;; - st8 [r2]= r17 - mov ar.rsc =0 - flushrs - ;; - SAVE_GENERAL_REGS - ;; - SAVE_KERNEL_REGS - ;; - SAVE_APP_REGS - ;; - SAVE_BRANCH_REGS - ;; - SAVE_CTL_REGS - ;; - SAVE_REGION_REGS - ;; - //SAVE_DEBUG_REGS - ;; - rsm psr.dfl - ;; - srlz.d - ;; - SAVE_FPU_LOW - ;; - rsm psr.dfh - ;; - srlz.d - ;; - SAVE_FPU_HIGH - ;; - SAVE_PTK_REGS - ;; - RESTORE_PTK_REGS - ;; - RESTORE_FPU_HIGH - ;; - RESTORE_FPU_LOW - ;; - //RESTORE_DEBUG_REGS - ;; - RESTORE_REGION_REGS - ;; - RESTORE_CTL_REGS - ;; - RESTORE_BRANCH_REGS - ;; - RESTORE_APP_REGS - ;; - RESTORE_KERNEL_REGS - ;; - RESTORE_GENERAL_REGS - ;; - adds r2=CTX(PSR), r33 - ;; - ld8 r16=[r2], 8 // psr - ;; - mov psr.l=r16 - ;; - srlz.d - ;; - ld8 r16=[r2], 8 // pr - ;; - mov pr =r16,-1 - ld8 r16=[r2] // unat - ;; - mov ar.unat=r16 - ;; - adds r2=CTX(RSC),r33 - ;; - ld8 r16 =[r2] - ;; - mov ar.rsc = r16 - ;; - br.ret.sptk.few b0 -END(vmm_trampoline) diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c deleted file mode 100644 index 958815c9787d..000000000000 --- a/arch/ia64/kvm/vcpu.c +++ /dev/null @@ -1,2209 +0,0 @@ -/* - * kvm_vcpu.c: handling all virtual cpu related thing. - * Copyright (c) 2005, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - * - * Shaofan Li (Susue Li) - * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com) - * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) - * Xiantao Zhang - */ - -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "asm-offsets.h" -#include "vcpu.h" - -/* - * Special notes: - * - Index by it/dt/rt sequence - * - Only existing mode transitions are allowed in this table - * - RSE is placed at lazy mode when emulating guest partial mode - * - If gva happens to be rr0 and rr4, only allowed case is identity - * mapping (gva=gpa), or panic! (How?) - */ -int mm_switch_table[8][8] = { - /* 2004/09/12(Kevin): Allow switch to self */ - /* - * (it,dt,rt): (0,0,0) -> (1,1,1) - * This kind of transition usually occurs in the very early - * stage of Linux boot up procedure. Another case is in efi - * and pal calls. (see "arch/ia64/kernel/head.S") - * - * (it,dt,rt): (0,0,0) -> (0,1,1) - * This kind of transition is found when OSYa exits efi boot - * service. Due to gva = gpa in this case (Same region), - * data access can be satisfied though itlb entry for physical - * emulation is hit. - */ - {SW_SELF, 0, 0, SW_NOP, 0, 0, 0, SW_P2V}, - {0, 0, 0, 0, 0, 0, 0, 0}, - {0, 0, 0, 0, 0, 0, 0, 0}, - /* - * (it,dt,rt): (0,1,1) -> (1,1,1) - * This kind of transition is found in OSYa. - * - * (it,dt,rt): (0,1,1) -> (0,0,0) - * This kind of transition is found in OSYa - */ - {SW_NOP, 0, 0, SW_SELF, 0, 0, 0, SW_P2V}, - /* (1,0,0)->(1,1,1) */ - {0, 0, 0, 0, 0, 0, 0, SW_P2V}, - /* - * (it,dt,rt): (1,0,1) -> (1,1,1) - * This kind of transition usually occurs when Linux returns - * from the low level TLB miss handlers. - * (see "arch/ia64/kernel/ivt.S") - */ - {0, 0, 0, 0, 0, SW_SELF, 0, SW_P2V}, - {0, 0, 0, 0, 0, 0, 0, 0}, - /* - * (it,dt,rt): (1,1,1) -> (1,0,1) - * This kind of transition usually occurs in Linux low level - * TLB miss handler. (see "arch/ia64/kernel/ivt.S") - * - * (it,dt,rt): (1,1,1) -> (0,0,0) - * This kind of transition usually occurs in pal and efi calls, - * which requires running in physical mode. - * (see "arch/ia64/kernel/head.S") - * (1,1,1)->(1,0,0) - */ - - {SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF}, -}; - -void physical_mode_init(struct kvm_vcpu *vcpu) -{ - vcpu->arch.mode_flags = GUEST_IN_PHY; -} - -void switch_to_physical_rid(struct kvm_vcpu *vcpu) -{ - unsigned long psr; - - /* Save original virtual mode rr[0] and rr[4] */ - psr = ia64_clear_ic(); - ia64_set_rr(VRN0<arch.metaphysical_rr0); - ia64_srlz_d(); - ia64_set_rr(VRN4<arch.metaphysical_rr4); - ia64_srlz_d(); - - ia64_set_psr(psr); - return; -} - -void switch_to_virtual_rid(struct kvm_vcpu *vcpu) -{ - unsigned long psr; - - psr = ia64_clear_ic(); - ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0); - ia64_srlz_d(); - ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4); - ia64_srlz_d(); - ia64_set_psr(psr); - return; -} - -static int mm_switch_action(struct ia64_psr opsr, struct ia64_psr npsr) -{ - return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)]; -} - -void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr, - struct ia64_psr new_psr) -{ - int act; - act = mm_switch_action(old_psr, new_psr); - switch (act) { - case SW_V2P: - /*printk("V -> P mode transition: (0x%lx -> 0x%lx)\n", - old_psr.val, new_psr.val);*/ - switch_to_physical_rid(vcpu); - /* - * Set rse to enforced lazy, to prevent active rse - *save/restor when guest physical mode. - */ - vcpu->arch.mode_flags |= GUEST_IN_PHY; - break; - case SW_P2V: - switch_to_virtual_rid(vcpu); - /* - * recover old mode which is saved when entering - * guest physical mode - */ - vcpu->arch.mode_flags &= ~GUEST_IN_PHY; - break; - case SW_SELF: - break; - case SW_NOP: - break; - default: - /* Sanity check */ - break; - } - return; -} - -/* - * In physical mode, insert tc/tr for region 0 and 4 uses - * RID[0] and RID[4] which is for physical mode emulation. - * However what those inserted tc/tr wants is rid for - * virtual mode. So original virtual rid needs to be restored - * before insert. - * - * Operations which required such switch include: - * - insertions (itc.*, itr.*) - * - purges (ptc.* and ptr.*) - * - tpa - * - tak - * - thash?, ttag? - * All above needs actual virtual rid for destination entry. - */ - -void check_mm_mode_switch(struct kvm_vcpu *vcpu, struct ia64_psr old_psr, - struct ia64_psr new_psr) -{ - - if ((old_psr.dt != new_psr.dt) - || (old_psr.it != new_psr.it) - || (old_psr.rt != new_psr.rt)) - switch_mm_mode(vcpu, old_psr, new_psr); - - return; -} - - -/* - * In physical mode, insert tc/tr for region 0 and 4 uses - * RID[0] and RID[4] which is for physical mode emulation. - * However what those inserted tc/tr wants is rid for - * virtual mode. So original virtual rid needs to be restored - * before insert. - * - * Operations which required such switch include: - * - insertions (itc.*, itr.*) - * - purges (ptc.* and ptr.*) - * - tpa - * - tak - * - thash?, ttag? - * All above needs actual virtual rid for destination entry. - */ - -void prepare_if_physical_mode(struct kvm_vcpu *vcpu) -{ - if (is_physical_mode(vcpu)) { - vcpu->arch.mode_flags |= GUEST_PHY_EMUL; - switch_to_virtual_rid(vcpu); - } - return; -} - -/* Recover always follows prepare */ -void recover_if_physical_mode(struct kvm_vcpu *vcpu) -{ - if (is_physical_mode(vcpu)) - switch_to_physical_rid(vcpu); - vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL; - return; -} - -#define RPT(x) ((u16) &((struct kvm_pt_regs *)0)->x) - -static u16 gr_info[32] = { - 0, /* r0 is read-only : WE SHOULD NEVER GET THIS */ - RPT(r1), RPT(r2), RPT(r3), - RPT(r4), RPT(r5), RPT(r6), RPT(r7), - RPT(r8), RPT(r9), RPT(r10), RPT(r11), - RPT(r12), RPT(r13), RPT(r14), RPT(r15), - RPT(r16), RPT(r17), RPT(r18), RPT(r19), - RPT(r20), RPT(r21), RPT(r22), RPT(r23), - RPT(r24), RPT(r25), RPT(r26), RPT(r27), - RPT(r28), RPT(r29), RPT(r30), RPT(r31) -}; - -#define IA64_FIRST_STACKED_GR 32 -#define IA64_FIRST_ROTATING_FR 32 - -static inline unsigned long -rotate_reg(unsigned long sor, unsigned long rrb, unsigned long reg) -{ - reg += rrb; - if (reg >= sor) - reg -= sor; - return reg; -} - -/* - * Return the (rotated) index for floating point register - * be in the REGNUM (REGNUM must range from 32-127, - * result is in the range from 0-95. - */ -static inline unsigned long fph_index(struct kvm_pt_regs *regs, - long regnum) -{ - unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f; - return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR)); -} - -/* - * The inverse of the above: given bspstore and the number of - * registers, calculate ar.bsp. - */ -static inline unsigned long *kvm_rse_skip_regs(unsigned long *addr, - long num_regs) -{ - long delta = ia64_rse_slot_num(addr) + num_regs; - int i = 0; - - if (num_regs < 0) - delta -= 0x3e; - if (delta < 0) { - while (delta <= -0x3f) { - i--; - delta += 0x3f; - } - } else { - while (delta >= 0x3f) { - i++; - delta -= 0x3f; - } - } - - return addr + num_regs + i; -} - -static void get_rse_reg(struct kvm_pt_regs *regs, unsigned long r1, - unsigned long *val, int *nat) -{ - unsigned long *bsp, *addr, *rnat_addr, *bspstore; - unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET; - unsigned long nat_mask; - unsigned long old_rsc, new_rsc; - long sof = (regs->cr_ifs) & 0x7f; - long sor = (((regs->cr_ifs >> 14) & 0xf) << 3); - long rrb_gr = (regs->cr_ifs >> 18) & 0x7f; - long ridx = r1 - 32; - - if (ridx < sor) - ridx = rotate_reg(sor, rrb_gr, ridx); - - old_rsc = ia64_getreg(_IA64_REG_AR_RSC); - new_rsc = old_rsc&(~(0x3)); - ia64_setreg(_IA64_REG_AR_RSC, new_rsc); - - bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE); - bsp = kbs + (regs->loadrs >> 19); - - addr = kvm_rse_skip_regs(bsp, -sof + ridx); - nat_mask = 1UL << ia64_rse_slot_num(addr); - rnat_addr = ia64_rse_rnat_addr(addr); - - if (addr >= bspstore) { - ia64_flushrs(); - ia64_mf(); - bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE); - } - *val = *addr; - if (nat) { - if (bspstore < rnat_addr) - *nat = (int)!!(ia64_getreg(_IA64_REG_AR_RNAT) - & nat_mask); - else - *nat = (int)!!((*rnat_addr) & nat_mask); - ia64_setreg(_IA64_REG_AR_RSC, old_rsc); - } -} - -void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1, - unsigned long val, unsigned long nat) -{ - unsigned long *bsp, *bspstore, *addr, *rnat_addr; - unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET; - unsigned long nat_mask; - unsigned long old_rsc, new_rsc, psr; - unsigned long rnat; - long sof = (regs->cr_ifs) & 0x7f; - long sor = (((regs->cr_ifs >> 14) & 0xf) << 3); - long rrb_gr = (regs->cr_ifs >> 18) & 0x7f; - long ridx = r1 - 32; - - if (ridx < sor) - ridx = rotate_reg(sor, rrb_gr, ridx); - - old_rsc = ia64_getreg(_IA64_REG_AR_RSC); - /* put RSC to lazy mode, and set loadrs 0 */ - new_rsc = old_rsc & (~0x3fff0003); - ia64_setreg(_IA64_REG_AR_RSC, new_rsc); - bsp = kbs + (regs->loadrs >> 19); /* 16 + 3 */ - - addr = kvm_rse_skip_regs(bsp, -sof + ridx); - nat_mask = 1UL << ia64_rse_slot_num(addr); - rnat_addr = ia64_rse_rnat_addr(addr); - - local_irq_save(psr); - bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE); - if (addr >= bspstore) { - - ia64_flushrs(); - ia64_mf(); - *addr = val; - bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE); - rnat = ia64_getreg(_IA64_REG_AR_RNAT); - if (bspstore < rnat_addr) - rnat = rnat & (~nat_mask); - else - *rnat_addr = (*rnat_addr)&(~nat_mask); - - ia64_mf(); - ia64_loadrs(); - ia64_setreg(_IA64_REG_AR_RNAT, rnat); - } else { - rnat = ia64_getreg(_IA64_REG_AR_RNAT); - *addr = val; - if (bspstore < rnat_addr) - rnat = rnat&(~nat_mask); - else - *rnat_addr = (*rnat_addr) & (~nat_mask); - - ia64_setreg(_IA64_REG_AR_BSPSTORE, (unsigned long)bspstore); - ia64_setreg(_IA64_REG_AR_RNAT, rnat); - } - local_irq_restore(psr); - ia64_setreg(_IA64_REG_AR_RSC, old_rsc); -} - -void getreg(unsigned long regnum, unsigned long *val, - int *nat, struct kvm_pt_regs *regs) -{ - unsigned long addr, *unat; - if (regnum >= IA64_FIRST_STACKED_GR) { - get_rse_reg(regs, regnum, val, nat); - return; - } - - /* - * Now look at registers in [0-31] range and init correct UNAT - */ - addr = (unsigned long)regs; - unat = ®s->eml_unat; - - addr += gr_info[regnum]; - - *val = *(unsigned long *)addr; - /* - * do it only when requested - */ - if (nat) - *nat = (*unat >> ((addr >> 3) & 0x3f)) & 0x1UL; -} - -void setreg(unsigned long regnum, unsigned long val, - int nat, struct kvm_pt_regs *regs) -{ - unsigned long addr; - unsigned long bitmask; - unsigned long *unat; - - /* - * First takes care of stacked registers - */ - if (regnum >= IA64_FIRST_STACKED_GR) { - set_rse_reg(regs, regnum, val, nat); - return; - } - - /* - * Now look at registers in [0-31] range and init correct UNAT - */ - addr = (unsigned long)regs; - unat = ®s->eml_unat; - /* - * add offset from base of struct - * and do it ! - */ - addr += gr_info[regnum]; - - *(unsigned long *)addr = val; - - /* - * We need to clear the corresponding UNAT bit to fully emulate the load - * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4 - */ - bitmask = 1UL << ((addr >> 3) & 0x3f); - if (nat) - *unat |= bitmask; - else - *unat &= ~bitmask; - -} - -u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg) -{ - struct kvm_pt_regs *regs = vcpu_regs(vcpu); - unsigned long val; - - if (!reg) - return 0; - getreg(reg, &val, 0, regs); - return val; -} - -void vcpu_set_gr(struct kvm_vcpu *vcpu, unsigned long reg, u64 value, int nat) -{ - struct kvm_pt_regs *regs = vcpu_regs(vcpu); - long sof = (regs->cr_ifs) & 0x7f; - - if (!reg) - return; - if (reg >= sof + 32) - return; - setreg(reg, value, nat, regs); /* FIXME: handle NATs later*/ -} - -void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval, - struct kvm_pt_regs *regs) -{ - /* Take floating register rotation into consideration*/ - if (regnum >= IA64_FIRST_ROTATING_FR) - regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum); -#define CASE_FIXED_FP(reg) \ - case (reg) : \ - ia64_stf_spill(fpval, reg); \ - break - - switch (regnum) { - CASE_FIXED_FP(0); - CASE_FIXED_FP(1); - CASE_FIXED_FP(2); - CASE_FIXED_FP(3); - CASE_FIXED_FP(4); - CASE_FIXED_FP(5); - - CASE_FIXED_FP(6); - CASE_FIXED_FP(7); - CASE_FIXED_FP(8); - CASE_FIXED_FP(9); - CASE_FIXED_FP(10); - CASE_FIXED_FP(11); - - CASE_FIXED_FP(12); - CASE_FIXED_FP(13); - CASE_FIXED_FP(14); - CASE_FIXED_FP(15); - CASE_FIXED_FP(16); - CASE_FIXED_FP(17); - CASE_FIXED_FP(18); - CASE_FIXED_FP(19); - CASE_FIXED_FP(20); - CASE_FIXED_FP(21); - CASE_FIXED_FP(22); - CASE_FIXED_FP(23); - CASE_FIXED_FP(24); - CASE_FIXED_FP(25); - CASE_FIXED_FP(26); - CASE_FIXED_FP(27); - CASE_FIXED_FP(28); - CASE_FIXED_FP(29); - CASE_FIXED_FP(30); - CASE_FIXED_FP(31); - CASE_FIXED_FP(32); - CASE_FIXED_FP(33); - CASE_FIXED_FP(34); - CASE_FIXED_FP(35); - CASE_FIXED_FP(36); - CASE_FIXED_FP(37); - CASE_FIXED_FP(38); - CASE_FIXED_FP(39); - CASE_FIXED_FP(40); - CASE_FIXED_FP(41); - CASE_FIXED_FP(42); - CASE_FIXED_FP(43); - CASE_FIXED_FP(44); - CASE_FIXED_FP(45); - CASE_FIXED_FP(46); - CASE_FIXED_FP(47); - CASE_FIXED_FP(48); - CASE_FIXED_FP(49); - CASE_FIXED_FP(50); - CASE_FIXED_FP(51); - CASE_FIXED_FP(52); - CASE_FIXED_FP(53); - CASE_FIXED_FP(54); - CASE_FIXED_FP(55); - CASE_FIXED_FP(56); - CASE_FIXED_FP(57); - CASE_FIXED_FP(58); - CASE_FIXED_FP(59); - CASE_FIXED_FP(60); - CASE_FIXED_FP(61); - CASE_FIXED_FP(62); - CASE_FIXED_FP(63); - CASE_FIXED_FP(64); - CASE_FIXED_FP(65); - CASE_FIXED_FP(66); - CASE_FIXED_FP(67); - CASE_FIXED_FP(68); - CASE_FIXED_FP(69); - CASE_FIXED_FP(70); - CASE_FIXED_FP(71); - CASE_FIXED_FP(72); - CASE_FIXED_FP(73); - CASE_FIXED_FP(74); - CASE_FIXED_FP(75); - CASE_FIXED_FP(76); - CASE_FIXED_FP(77); - CASE_FIXED_FP(78); - CASE_FIXED_FP(79); - CASE_FIXED_FP(80); - CASE_FIXED_FP(81); - CASE_FIXED_FP(82); - CASE_FIXED_FP(83); - CASE_FIXED_FP(84); - CASE_FIXED_FP(85); - CASE_FIXED_FP(86); - CASE_FIXED_FP(87); - CASE_FIXED_FP(88); - CASE_FIXED_FP(89); - CASE_FIXED_FP(90); - CASE_FIXED_FP(91); - CASE_FIXED_FP(92); - CASE_FIXED_FP(93); - CASE_FIXED_FP(94); - CASE_FIXED_FP(95); - CASE_FIXED_FP(96); - CASE_FIXED_FP(97); - CASE_FIXED_FP(98); - CASE_FIXED_FP(99); - CASE_FIXED_FP(100); - CASE_FIXED_FP(101); - CASE_FIXED_FP(102); - CASE_FIXED_FP(103); - CASE_FIXED_FP(104); - CASE_FIXED_FP(105); - CASE_FIXED_FP(106); - CASE_FIXED_FP(107); - CASE_FIXED_FP(108); - CASE_FIXED_FP(109); - CASE_FIXED_FP(110); - CASE_FIXED_FP(111); - CASE_FIXED_FP(112); - CASE_FIXED_FP(113); - CASE_FIXED_FP(114); - CASE_FIXED_FP(115); - CASE_FIXED_FP(116); - CASE_FIXED_FP(117); - CASE_FIXED_FP(118); - CASE_FIXED_FP(119); - CASE_FIXED_FP(120); - CASE_FIXED_FP(121); - CASE_FIXED_FP(122); - CASE_FIXED_FP(123); - CASE_FIXED_FP(124); - CASE_FIXED_FP(125); - CASE_FIXED_FP(126); - CASE_FIXED_FP(127); - } -#undef CASE_FIXED_FP -} - -void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval, - struct kvm_pt_regs *regs) -{ - /* Take floating register rotation into consideration*/ - if (regnum >= IA64_FIRST_ROTATING_FR) - regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum); - -#define CASE_FIXED_FP(reg) \ - case (reg) : \ - ia64_ldf_fill(reg, fpval); \ - break - - switch (regnum) { - CASE_FIXED_FP(2); - CASE_FIXED_FP(3); - CASE_FIXED_FP(4); - CASE_FIXED_FP(5); - - CASE_FIXED_FP(6); - CASE_FIXED_FP(7); - CASE_FIXED_FP(8); - CASE_FIXED_FP(9); - CASE_FIXED_FP(10); - CASE_FIXED_FP(11); - - CASE_FIXED_FP(12); - CASE_FIXED_FP(13); - CASE_FIXED_FP(14); - CASE_FIXED_FP(15); - CASE_FIXED_FP(16); - CASE_FIXED_FP(17); - CASE_FIXED_FP(18); - CASE_FIXED_FP(19); - CASE_FIXED_FP(20); - CASE_FIXED_FP(21); - CASE_FIXED_FP(22); - CASE_FIXED_FP(23); - CASE_FIXED_FP(24); - CASE_FIXED_FP(25); - CASE_FIXED_FP(26); - CASE_FIXED_FP(27); - CASE_FIXED_FP(28); - CASE_FIXED_FP(29); - CASE_FIXED_FP(30); - CASE_FIXED_FP(31); - CASE_FIXED_FP(32); - CASE_FIXED_FP(33); - CASE_FIXED_FP(34); - CASE_FIXED_FP(35); - CASE_FIXED_FP(36); - CASE_FIXED_FP(37); - CASE_FIXED_FP(38); - CASE_FIXED_FP(39); - CASE_FIXED_FP(40); - CASE_FIXED_FP(41); - CASE_FIXED_FP(42); - CASE_FIXED_FP(43); - CASE_FIXED_FP(44); - CASE_FIXED_FP(45); - CASE_FIXED_FP(46); - CASE_FIXED_FP(47); - CASE_FIXED_FP(48); - CASE_FIXED_FP(49); - CASE_FIXED_FP(50); - CASE_FIXED_FP(51); - CASE_FIXED_FP(52); - CASE_FIXED_FP(53); - CASE_FIXED_FP(54); - CASE_FIXED_FP(55); - CASE_FIXED_FP(56); - CASE_FIXED_FP(57); - CASE_FIXED_FP(58); - CASE_FIXED_FP(59); - CASE_FIXED_FP(60); - CASE_FIXED_FP(61); - CASE_FIXED_FP(62); - CASE_FIXED_FP(63); - CASE_FIXED_FP(64); - CASE_FIXED_FP(65); - CASE_FIXED_FP(66); - CASE_FIXED_FP(67); - CASE_FIXED_FP(68); - CASE_FIXED_FP(69); - CASE_FIXED_FP(70); - CASE_FIXED_FP(71); - CASE_FIXED_FP(72); - CASE_FIXED_FP(73); - CASE_FIXED_FP(74); - CASE_FIXED_FP(75); - CASE_FIXED_FP(76); - CASE_FIXED_FP(77); - CASE_FIXED_FP(78); - CASE_FIXED_FP(79); - CASE_FIXED_FP(80); - CASE_FIXED_FP(81); - CASE_FIXED_FP(82); - CASE_FIXED_FP(83); - CASE_FIXED_FP(84); - CASE_FIXED_FP(85); - CASE_FIXED_FP(86); - CASE_FIXED_FP(87); - CASE_FIXED_FP(88); - CASE_FIXED_FP(89); - CASE_FIXED_FP(90); - CASE_FIXED_FP(91); - CASE_FIXED_FP(92); - CASE_FIXED_FP(93); - CASE_FIXED_FP(94); - CASE_FIXED_FP(95); - CASE_FIXED_FP(96); - CASE_FIXED_FP(97); - CASE_FIXED_FP(98); - CASE_FIXED_FP(99); - CASE_FIXED_FP(100); - CASE_FIXED_FP(101); - CASE_FIXED_FP(102); - CASE_FIXED_FP(103); - CASE_FIXED_FP(104); - CASE_FIXED_FP(105); - CASE_FIXED_FP(106); - CASE_FIXED_FP(107); - CASE_FIXED_FP(108); - CASE_FIXED_FP(109); - CASE_FIXED_FP(110); - CASE_FIXED_FP(111); - CASE_FIXED_FP(112); - CASE_FIXED_FP(113); - CASE_FIXED_FP(114); - CASE_FIXED_FP(115); - CASE_FIXED_FP(116); - CASE_FIXED_FP(117); - CASE_FIXED_FP(118); - CASE_FIXED_FP(119); - CASE_FIXED_FP(120); - CASE_FIXED_FP(121); - CASE_FIXED_FP(122); - CASE_FIXED_FP(123); - CASE_FIXED_FP(124); - CASE_FIXED_FP(125); - CASE_FIXED_FP(126); - CASE_FIXED_FP(127); - } -} - -void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg, - struct ia64_fpreg *val) -{ - struct kvm_pt_regs *regs = vcpu_regs(vcpu); - - getfpreg(reg, val, regs); /* FIXME: handle NATs later*/ -} - -void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg, - struct ia64_fpreg *val) -{ - struct kvm_pt_regs *regs = vcpu_regs(vcpu); - - if (reg > 1) - setfpreg(reg, val, regs); /* FIXME: handle NATs later*/ -} - -/* - * The Altix RTC is mapped specially here for the vmm module - */ -#define SN_RTC_BASE (u64 *)(KVM_VMM_BASE+(1UL<arch.is_sn2) - return (*SN_RTC_BASE); - else -#endif - return ia64_getreg(_IA64_REG_AR_ITC); -} - -/************************************************************************ - * lsapic timer - ***********************************************************************/ -u64 vcpu_get_itc(struct kvm_vcpu *vcpu) -{ - unsigned long guest_itc; - guest_itc = VMX(vcpu, itc_offset) + kvm_get_itc(vcpu); - - if (guest_itc >= VMX(vcpu, last_itc)) { - VMX(vcpu, last_itc) = guest_itc; - return guest_itc; - } else - return VMX(vcpu, last_itc); -} - -static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val); -static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val) -{ - struct kvm_vcpu *v; - struct kvm *kvm; - int i; - long itc_offset = val - kvm_get_itc(vcpu); - unsigned long vitv = VCPU(vcpu, itv); - - kvm = (struct kvm *)KVM_VM_BASE; - - if (kvm_vcpu_is_bsp(vcpu)) { - for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) { - v = (struct kvm_vcpu *)((char *)vcpu + - sizeof(struct kvm_vcpu_data) * i); - VMX(v, itc_offset) = itc_offset; - VMX(v, last_itc) = 0; - } - } - VMX(vcpu, last_itc) = 0; - if (VCPU(vcpu, itm) <= val) { - VMX(vcpu, itc_check) = 0; - vcpu_unpend_interrupt(vcpu, vitv); - } else { - VMX(vcpu, itc_check) = 1; - vcpu_set_itm(vcpu, VCPU(vcpu, itm)); - } - -} - -static inline u64 vcpu_get_itm(struct kvm_vcpu *vcpu) -{ - return ((u64)VCPU(vcpu, itm)); -} - -static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val) -{ - unsigned long vitv = VCPU(vcpu, itv); - VCPU(vcpu, itm) = val; - - if (val > vcpu_get_itc(vcpu)) { - VMX(vcpu, itc_check) = 1; - vcpu_unpend_interrupt(vcpu, vitv); - VMX(vcpu, timer_pending) = 0; - } else - VMX(vcpu, itc_check) = 0; -} - -#define ITV_VECTOR(itv) (itv&0xff) -#define ITV_IRQ_MASK(itv) (itv&(1<<16)) - -static inline void vcpu_set_itv(struct kvm_vcpu *vcpu, u64 val) -{ - VCPU(vcpu, itv) = val; - if (!ITV_IRQ_MASK(val) && vcpu->arch.timer_pending) { - vcpu_pend_interrupt(vcpu, ITV_VECTOR(val)); - vcpu->arch.timer_pending = 0; - } -} - -static inline void vcpu_set_eoi(struct kvm_vcpu *vcpu, u64 val) -{ - int vec; - - vec = highest_inservice_irq(vcpu); - if (vec == NULL_VECTOR) - return; - VMX(vcpu, insvc[vec >> 6]) &= ~(1UL << (vec & 63)); - VCPU(vcpu, eoi) = 0; - vcpu->arch.irq_new_pending = 1; - -} - -/* See Table 5-8 in SDM vol2 for the definition */ -int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice) -{ - union ia64_tpr vtpr; - - vtpr.val = VCPU(vcpu, tpr); - - if (h_inservice == NMI_VECTOR) - return IRQ_MASKED_BY_INSVC; - - if (h_pending == NMI_VECTOR) { - /* Non Maskable Interrupt */ - return IRQ_NO_MASKED; - } - - if (h_inservice == ExtINT_VECTOR) - return IRQ_MASKED_BY_INSVC; - - if (h_pending == ExtINT_VECTOR) { - if (vtpr.mmi) { - /* mask all external IRQ */ - return IRQ_MASKED_BY_VTPR; - } else - return IRQ_NO_MASKED; - } - - if (is_higher_irq(h_pending, h_inservice)) { - if (is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4))) - return IRQ_NO_MASKED; - else - return IRQ_MASKED_BY_VTPR; - } else { - return IRQ_MASKED_BY_INSVC; - } -} - -void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec) -{ - long spsr; - int ret; - - local_irq_save(spsr); - ret = test_and_set_bit(vec, &VCPU(vcpu, irr[0])); - local_irq_restore(spsr); - - vcpu->arch.irq_new_pending = 1; -} - -void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec) -{ - long spsr; - int ret; - - local_irq_save(spsr); - ret = test_and_clear_bit(vec, &VCPU(vcpu, irr[0])); - local_irq_restore(spsr); - if (ret) { - vcpu->arch.irq_new_pending = 1; - wmb(); - } -} - -void update_vhpi(struct kvm_vcpu *vcpu, int vec) -{ - u64 vhpi; - - if (vec == NULL_VECTOR) - vhpi = 0; - else if (vec == NMI_VECTOR) - vhpi = 32; - else if (vec == ExtINT_VECTOR) - vhpi = 16; - else - vhpi = vec >> 4; - - VCPU(vcpu, vhpi) = vhpi; - if (VCPU(vcpu, vac).a_int) - ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT, - (u64)vcpu->arch.vpd, 0, 0, 0, 0, 0, 0); -} - -u64 vcpu_get_ivr(struct kvm_vcpu *vcpu) -{ - int vec, h_inservice, mask; - - vec = highest_pending_irq(vcpu); - h_inservice = highest_inservice_irq(vcpu); - mask = irq_masked(vcpu, vec, h_inservice); - if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) { - if (VCPU(vcpu, vhpi)) - update_vhpi(vcpu, NULL_VECTOR); - return IA64_SPURIOUS_INT_VECTOR; - } - if (mask == IRQ_MASKED_BY_VTPR) { - update_vhpi(vcpu, vec); - return IA64_SPURIOUS_INT_VECTOR; - } - VMX(vcpu, insvc[vec >> 6]) |= (1UL << (vec & 63)); - vcpu_unpend_interrupt(vcpu, vec); - return (u64)vec; -} - -/************************************************************************** - Privileged operation emulation routines - **************************************************************************/ -u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr) -{ - union ia64_pta vpta; - union ia64_rr vrr; - u64 pval; - u64 vhpt_offset; - - vpta.val = vcpu_get_pta(vcpu); - vrr.val = vcpu_get_rr(vcpu, vadr); - vhpt_offset = ((vadr >> vrr.ps) << 3) & ((1UL << (vpta.size)) - 1); - if (vpta.vf) { - pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.val, - vpta.val, 0, 0, 0, 0); - } else { - pval = (vadr & VRN_MASK) | vhpt_offset | - (vpta.val << 3 >> (vpta.size + 3) << (vpta.size)); - } - return pval; -} - -u64 vcpu_ttag(struct kvm_vcpu *vcpu, u64 vadr) -{ - union ia64_rr vrr; - union ia64_pta vpta; - u64 pval; - - vpta.val = vcpu_get_pta(vcpu); - vrr.val = vcpu_get_rr(vcpu, vadr); - if (vpta.vf) { - pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.val, - 0, 0, 0, 0, 0); - } else - pval = 1; - - return pval; -} - -u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr) -{ - struct thash_data *data; - union ia64_pta vpta; - u64 key; - - vpta.val = vcpu_get_pta(vcpu); - if (vpta.vf == 0) { - key = 1; - return key; - } - data = vtlb_lookup(vcpu, vadr, D_TLB); - if (!data || !data->p) - key = 1; - else - key = data->key; - - return key; -} - -void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long thash, vadr; - - vadr = vcpu_get_gr(vcpu, inst.M46.r3); - thash = vcpu_thash(vcpu, vadr); - vcpu_set_gr(vcpu, inst.M46.r1, thash, 0); -} - -void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long tag, vadr; - - vadr = vcpu_get_gr(vcpu, inst.M46.r3); - tag = vcpu_ttag(vcpu, vadr); - vcpu_set_gr(vcpu, inst.M46.r1, tag, 0); -} - -int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, unsigned long *padr) -{ - struct thash_data *data; - union ia64_isr visr, pt_isr; - struct kvm_pt_regs *regs; - struct ia64_psr vpsr; - - regs = vcpu_regs(vcpu); - pt_isr.val = VMX(vcpu, cr_isr); - visr.val = 0; - visr.ei = pt_isr.ei; - visr.ir = pt_isr.ir; - vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); - visr.na = 1; - - data = vhpt_lookup(vadr); - if (data) { - if (data->p == 0) { - vcpu_set_isr(vcpu, visr.val); - data_page_not_present(vcpu, vadr); - return IA64_FAULT; - } else if (data->ma == VA_MATTR_NATPAGE) { - vcpu_set_isr(vcpu, visr.val); - dnat_page_consumption(vcpu, vadr); - return IA64_FAULT; - } else { - *padr = (data->gpaddr >> data->ps << data->ps) | - (vadr & (PSIZE(data->ps) - 1)); - return IA64_NO_FAULT; - } - } - - data = vtlb_lookup(vcpu, vadr, D_TLB); - if (data) { - if (data->p == 0) { - vcpu_set_isr(vcpu, visr.val); - data_page_not_present(vcpu, vadr); - return IA64_FAULT; - } else if (data->ma == VA_MATTR_NATPAGE) { - vcpu_set_isr(vcpu, visr.val); - dnat_page_consumption(vcpu, vadr); - return IA64_FAULT; - } else{ - *padr = ((data->ppn >> (data->ps - 12)) << data->ps) - | (vadr & (PSIZE(data->ps) - 1)); - return IA64_NO_FAULT; - } - } - if (!vhpt_enabled(vcpu, vadr, NA_REF)) { - if (vpsr.ic) { - vcpu_set_isr(vcpu, visr.val); - alt_dtlb(vcpu, vadr); - return IA64_FAULT; - } else { - nested_dtlb(vcpu); - return IA64_FAULT; - } - } else { - if (vpsr.ic) { - vcpu_set_isr(vcpu, visr.val); - dvhpt_fault(vcpu, vadr); - return IA64_FAULT; - } else{ - nested_dtlb(vcpu); - return IA64_FAULT; - } - } - - return IA64_NO_FAULT; -} - -int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long r1, r3; - - r3 = vcpu_get_gr(vcpu, inst.M46.r3); - - if (vcpu_tpa(vcpu, r3, &r1)) - return IA64_FAULT; - - vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); - return(IA64_NO_FAULT); -} - -void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long r1, r3; - - r3 = vcpu_get_gr(vcpu, inst.M46.r3); - r1 = vcpu_tak(vcpu, r3); - vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); -} - -/************************************ - * Insert/Purge translation register/cache - ************************************/ -void vcpu_itc_i(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa) -{ - thash_purge_and_insert(vcpu, pte, itir, ifa, I_TLB); -} - -void vcpu_itc_d(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa) -{ - thash_purge_and_insert(vcpu, pte, itir, ifa, D_TLB); -} - -void vcpu_itr_i(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa) -{ - u64 ps, va, rid; - struct thash_data *p_itr; - - ps = itir_ps(itir); - va = PAGEALIGN(ifa, ps); - pte &= ~PAGE_FLAGS_RV_MASK; - rid = vcpu_get_rr(vcpu, ifa); - rid = rid & RR_RID_MASK; - p_itr = (struct thash_data *)&vcpu->arch.itrs[slot]; - vcpu_set_tr(p_itr, pte, itir, va, rid); - vcpu_quick_region_set(VMX(vcpu, itr_regions), va); -} - - -void vcpu_itr_d(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa) -{ - u64 gpfn; - u64 ps, va, rid; - struct thash_data *p_dtr; - - ps = itir_ps(itir); - va = PAGEALIGN(ifa, ps); - pte &= ~PAGE_FLAGS_RV_MASK; - - if (ps != _PAGE_SIZE_16M) - thash_purge_entries(vcpu, va, ps); - gpfn = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT; - if (__gpfn_is_io(gpfn)) - pte |= VTLB_PTE_IO; - rid = vcpu_get_rr(vcpu, va); - rid = rid & RR_RID_MASK; - p_dtr = (struct thash_data *)&vcpu->arch.dtrs[slot]; - vcpu_set_tr((struct thash_data *)&vcpu->arch.dtrs[slot], - pte, itir, va, rid); - vcpu_quick_region_set(VMX(vcpu, dtr_regions), va); -} - -void vcpu_ptr_d(struct kvm_vcpu *vcpu, u64 ifa, u64 ps) -{ - int index; - u64 va; - - va = PAGEALIGN(ifa, ps); - while ((index = vtr_find_overlap(vcpu, va, ps, D_TLB)) >= 0) - vcpu->arch.dtrs[index].page_flags = 0; - - thash_purge_entries(vcpu, va, ps); -} - -void vcpu_ptr_i(struct kvm_vcpu *vcpu, u64 ifa, u64 ps) -{ - int index; - u64 va; - - va = PAGEALIGN(ifa, ps); - while ((index = vtr_find_overlap(vcpu, va, ps, I_TLB)) >= 0) - vcpu->arch.itrs[index].page_flags = 0; - - thash_purge_entries(vcpu, va, ps); -} - -void vcpu_ptc_l(struct kvm_vcpu *vcpu, u64 va, u64 ps) -{ - va = PAGEALIGN(va, ps); - thash_purge_entries(vcpu, va, ps); -} - -void vcpu_ptc_e(struct kvm_vcpu *vcpu, u64 va) -{ - thash_purge_all(vcpu); -} - -void vcpu_ptc_ga(struct kvm_vcpu *vcpu, u64 va, u64 ps) -{ - struct exit_ctl_data *p = &vcpu->arch.exit_data; - long psr; - local_irq_save(psr); - p->exit_reason = EXIT_REASON_PTC_G; - - p->u.ptc_g_data.rr = vcpu_get_rr(vcpu, va); - p->u.ptc_g_data.vaddr = va; - p->u.ptc_g_data.ps = ps; - vmm_transition(vcpu); - /* Do Local Purge Here*/ - vcpu_ptc_l(vcpu, va, ps); - local_irq_restore(psr); -} - - -void vcpu_ptc_g(struct kvm_vcpu *vcpu, u64 va, u64 ps) -{ - vcpu_ptc_ga(vcpu, va, ps); -} - -void kvm_ptc_e(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long ifa; - - ifa = vcpu_get_gr(vcpu, inst.M45.r3); - vcpu_ptc_e(vcpu, ifa); -} - -void kvm_ptc_g(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long ifa, itir; - - ifa = vcpu_get_gr(vcpu, inst.M45.r3); - itir = vcpu_get_gr(vcpu, inst.M45.r2); - vcpu_ptc_g(vcpu, ifa, itir_ps(itir)); -} - -void kvm_ptc_ga(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long ifa, itir; - - ifa = vcpu_get_gr(vcpu, inst.M45.r3); - itir = vcpu_get_gr(vcpu, inst.M45.r2); - vcpu_ptc_ga(vcpu, ifa, itir_ps(itir)); -} - -void kvm_ptc_l(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long ifa, itir; - - ifa = vcpu_get_gr(vcpu, inst.M45.r3); - itir = vcpu_get_gr(vcpu, inst.M45.r2); - vcpu_ptc_l(vcpu, ifa, itir_ps(itir)); -} - -void kvm_ptr_d(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long ifa, itir; - - ifa = vcpu_get_gr(vcpu, inst.M45.r3); - itir = vcpu_get_gr(vcpu, inst.M45.r2); - vcpu_ptr_d(vcpu, ifa, itir_ps(itir)); -} - -void kvm_ptr_i(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long ifa, itir; - - ifa = vcpu_get_gr(vcpu, inst.M45.r3); - itir = vcpu_get_gr(vcpu, inst.M45.r2); - vcpu_ptr_i(vcpu, ifa, itir_ps(itir)); -} - -void kvm_itr_d(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long itir, ifa, pte, slot; - - slot = vcpu_get_gr(vcpu, inst.M45.r3); - pte = vcpu_get_gr(vcpu, inst.M45.r2); - itir = vcpu_get_itir(vcpu); - ifa = vcpu_get_ifa(vcpu); - vcpu_itr_d(vcpu, slot, pte, itir, ifa); -} - - - -void kvm_itr_i(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long itir, ifa, pte, slot; - - slot = vcpu_get_gr(vcpu, inst.M45.r3); - pte = vcpu_get_gr(vcpu, inst.M45.r2); - itir = vcpu_get_itir(vcpu); - ifa = vcpu_get_ifa(vcpu); - vcpu_itr_i(vcpu, slot, pte, itir, ifa); -} - -void kvm_itc_d(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long itir, ifa, pte; - - itir = vcpu_get_itir(vcpu); - ifa = vcpu_get_ifa(vcpu); - pte = vcpu_get_gr(vcpu, inst.M45.r2); - vcpu_itc_d(vcpu, pte, itir, ifa); -} - -void kvm_itc_i(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long itir, ifa, pte; - - itir = vcpu_get_itir(vcpu); - ifa = vcpu_get_ifa(vcpu); - pte = vcpu_get_gr(vcpu, inst.M45.r2); - vcpu_itc_i(vcpu, pte, itir, ifa); -} - -/************************************* - * Moves to semi-privileged registers - *************************************/ - -void kvm_mov_to_ar_imm(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long imm; - - if (inst.M30.s) - imm = -inst.M30.imm; - else - imm = inst.M30.imm; - - vcpu_set_itc(vcpu, imm); -} - -void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long r2; - - r2 = vcpu_get_gr(vcpu, inst.M29.r2); - vcpu_set_itc(vcpu, r2); -} - -void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long r1; - - r1 = vcpu_get_itc(vcpu); - vcpu_set_gr(vcpu, inst.M31.r1, r1, 0); -} - -/************************************************************************** - struct kvm_vcpu protection key register access routines - **************************************************************************/ - -unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg) -{ - return ((unsigned long)ia64_get_pkr(reg)); -} - -void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val) -{ - ia64_set_pkr(reg, val); -} - -/******************************** - * Moves to privileged registers - ********************************/ -unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg, - unsigned long val) -{ - union ia64_rr oldrr, newrr; - unsigned long rrval; - struct exit_ctl_data *p = &vcpu->arch.exit_data; - unsigned long psr; - - oldrr.val = vcpu_get_rr(vcpu, reg); - newrr.val = val; - vcpu->arch.vrr[reg >> VRN_SHIFT] = val; - - switch ((unsigned long)(reg >> VRN_SHIFT)) { - case VRN6: - vcpu->arch.vmm_rr = vrrtomrr(val); - local_irq_save(psr); - p->exit_reason = EXIT_REASON_SWITCH_RR6; - vmm_transition(vcpu); - local_irq_restore(psr); - break; - case VRN4: - rrval = vrrtomrr(val); - vcpu->arch.metaphysical_saved_rr4 = rrval; - if (!is_physical_mode(vcpu)) - ia64_set_rr(reg, rrval); - break; - case VRN0: - rrval = vrrtomrr(val); - vcpu->arch.metaphysical_saved_rr0 = rrval; - if (!is_physical_mode(vcpu)) - ia64_set_rr(reg, rrval); - break; - default: - ia64_set_rr(reg, vrrtomrr(val)); - break; - } - - return (IA64_NO_FAULT); -} - -void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long r3, r2; - - r3 = vcpu_get_gr(vcpu, inst.M42.r3); - r2 = vcpu_get_gr(vcpu, inst.M42.r2); - vcpu_set_rr(vcpu, r3, r2); -} - -void kvm_mov_to_dbr(struct kvm_vcpu *vcpu, INST64 inst) -{ -} - -void kvm_mov_to_ibr(struct kvm_vcpu *vcpu, INST64 inst) -{ -} - -void kvm_mov_to_pmc(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long r3, r2; - - r3 = vcpu_get_gr(vcpu, inst.M42.r3); - r2 = vcpu_get_gr(vcpu, inst.M42.r2); - vcpu_set_pmc(vcpu, r3, r2); -} - -void kvm_mov_to_pmd(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long r3, r2; - - r3 = vcpu_get_gr(vcpu, inst.M42.r3); - r2 = vcpu_get_gr(vcpu, inst.M42.r2); - vcpu_set_pmd(vcpu, r3, r2); -} - -void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst) -{ - u64 r3, r2; - - r3 = vcpu_get_gr(vcpu, inst.M42.r3); - r2 = vcpu_get_gr(vcpu, inst.M42.r2); - vcpu_set_pkr(vcpu, r3, r2); -} - -void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long r3, r1; - - r3 = vcpu_get_gr(vcpu, inst.M43.r3); - r1 = vcpu_get_rr(vcpu, r3); - vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); -} - -void kvm_mov_from_pkr(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long r3, r1; - - r3 = vcpu_get_gr(vcpu, inst.M43.r3); - r1 = vcpu_get_pkr(vcpu, r3); - vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); -} - -void kvm_mov_from_dbr(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long r3, r1; - - r3 = vcpu_get_gr(vcpu, inst.M43.r3); - r1 = vcpu_get_dbr(vcpu, r3); - vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); -} - -void kvm_mov_from_ibr(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long r3, r1; - - r3 = vcpu_get_gr(vcpu, inst.M43.r3); - r1 = vcpu_get_ibr(vcpu, r3); - vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); -} - -void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long r3, r1; - - r3 = vcpu_get_gr(vcpu, inst.M43.r3); - r1 = vcpu_get_pmc(vcpu, r3); - vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); -} - -unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg) -{ - /* FIXME: This could get called as a result of a rsvd-reg fault */ - if (reg > (ia64_get_cpuid(3) & 0xff)) - return 0; - else - return ia64_get_cpuid(reg); -} - -void kvm_mov_from_cpuid(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long r3, r1; - - r3 = vcpu_get_gr(vcpu, inst.M43.r3); - r1 = vcpu_get_cpuid(vcpu, r3); - vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); -} - -void vcpu_set_tpr(struct kvm_vcpu *vcpu, unsigned long val) -{ - VCPU(vcpu, tpr) = val; - vcpu->arch.irq_check = 1; -} - -unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long r2; - - r2 = vcpu_get_gr(vcpu, inst.M32.r2); - VCPU(vcpu, vcr[inst.M32.cr3]) = r2; - - switch (inst.M32.cr3) { - case 0: - vcpu_set_dcr(vcpu, r2); - break; - case 1: - vcpu_set_itm(vcpu, r2); - break; - case 66: - vcpu_set_tpr(vcpu, r2); - break; - case 67: - vcpu_set_eoi(vcpu, r2); - break; - default: - break; - } - - return 0; -} - -unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long tgt = inst.M33.r1; - unsigned long val; - - switch (inst.M33.cr3) { - case 65: - val = vcpu_get_ivr(vcpu); - vcpu_set_gr(vcpu, tgt, val, 0); - break; - - case 67: - vcpu_set_gr(vcpu, tgt, 0L, 0); - break; - default: - val = VCPU(vcpu, vcr[inst.M33.cr3]); - vcpu_set_gr(vcpu, tgt, val, 0); - break; - } - - return 0; -} - -void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val) -{ - - unsigned long mask; - struct kvm_pt_regs *regs; - struct ia64_psr old_psr, new_psr; - - old_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); - - regs = vcpu_regs(vcpu); - /* We only support guest as: - * vpsr.pk = 0 - * vpsr.is = 0 - * Otherwise panic - */ - if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM)) - panic_vm(vcpu, "Only support guests with vpsr.pk =0 " - "& vpsr.is=0\n"); - - /* - * For those IA64_PSR bits: id/da/dd/ss/ed/ia - * Since these bits will become 0, after success execution of each - * instruction, we will change set them to mIA64_PSR - */ - VCPU(vcpu, vpsr) = val - & (~(IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | - IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA)); - - if (!old_psr.i && (val & IA64_PSR_I)) { - /* vpsr.i 0->1 */ - vcpu->arch.irq_check = 1; - } - new_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); - - /* - * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr) - * , except for the following bits: - * ic/i/dt/si/rt/mc/it/bn/vm - */ - mask = IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI + - IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN + - IA64_PSR_VM; - - regs->cr_ipsr = (regs->cr_ipsr & mask) | (val & (~mask)); - - check_mm_mode_switch(vcpu, old_psr, new_psr); - - return ; -} - -unsigned long vcpu_cover(struct kvm_vcpu *vcpu) -{ - struct ia64_psr vpsr; - - struct kvm_pt_regs *regs = vcpu_regs(vcpu); - vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); - - if (!vpsr.ic) - VCPU(vcpu, ifs) = regs->cr_ifs; - regs->cr_ifs = IA64_IFS_V; - return (IA64_NO_FAULT); -} - - - -/************************************************************************** - VCPU banked general register access routines - **************************************************************************/ -#define vcpu_bsw0_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \ - do { \ - __asm__ __volatile__ ( \ - ";;extr.u %0 = %3,%6,16;;\n" \ - "dep %1 = %0, %1, 0, 16;;\n" \ - "st8 [%4] = %1\n" \ - "extr.u %0 = %2, 16, 16;;\n" \ - "dep %3 = %0, %3, %6, 16;;\n" \ - "st8 [%5] = %3\n" \ - ::"r"(i), "r"(*b1unat), "r"(*b0unat), \ - "r"(*runat), "r"(b1unat), "r"(runat), \ - "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \ - } while (0) - -void vcpu_bsw0(struct kvm_vcpu *vcpu) -{ - unsigned long i; - - struct kvm_pt_regs *regs = vcpu_regs(vcpu); - unsigned long *r = ®s->r16; - unsigned long *b0 = &VCPU(vcpu, vbgr[0]); - unsigned long *b1 = &VCPU(vcpu, vgr[0]); - unsigned long *runat = ®s->eml_unat; - unsigned long *b0unat = &VCPU(vcpu, vbnat); - unsigned long *b1unat = &VCPU(vcpu, vnat); - - - if (VCPU(vcpu, vpsr) & IA64_PSR_BN) { - for (i = 0; i < 16; i++) { - *b1++ = *r; - *r++ = *b0++; - } - vcpu_bsw0_unat(i, b0unat, b1unat, runat, - VMM_PT_REGS_R16_SLOT); - VCPU(vcpu, vpsr) &= ~IA64_PSR_BN; - } -} - -#define vcpu_bsw1_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \ - do { \ - __asm__ __volatile__ (";;extr.u %0 = %3, %6, 16;;\n" \ - "dep %1 = %0, %1, 16, 16;;\n" \ - "st8 [%4] = %1\n" \ - "extr.u %0 = %2, 0, 16;;\n" \ - "dep %3 = %0, %3, %6, 16;;\n" \ - "st8 [%5] = %3\n" \ - ::"r"(i), "r"(*b0unat), "r"(*b1unat), \ - "r"(*runat), "r"(b0unat), "r"(runat), \ - "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \ - } while (0) - -void vcpu_bsw1(struct kvm_vcpu *vcpu) -{ - unsigned long i; - struct kvm_pt_regs *regs = vcpu_regs(vcpu); - unsigned long *r = ®s->r16; - unsigned long *b0 = &VCPU(vcpu, vbgr[0]); - unsigned long *b1 = &VCPU(vcpu, vgr[0]); - unsigned long *runat = ®s->eml_unat; - unsigned long *b0unat = &VCPU(vcpu, vbnat); - unsigned long *b1unat = &VCPU(vcpu, vnat); - - if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) { - for (i = 0; i < 16; i++) { - *b0++ = *r; - *r++ = *b1++; - } - vcpu_bsw1_unat(i, b0unat, b1unat, runat, - VMM_PT_REGS_R16_SLOT); - VCPU(vcpu, vpsr) |= IA64_PSR_BN; - } -} - -void vcpu_rfi(struct kvm_vcpu *vcpu) -{ - unsigned long ifs, psr; - struct kvm_pt_regs *regs = vcpu_regs(vcpu); - - psr = VCPU(vcpu, ipsr); - if (psr & IA64_PSR_BN) - vcpu_bsw1(vcpu); - else - vcpu_bsw0(vcpu); - vcpu_set_psr(vcpu, psr); - ifs = VCPU(vcpu, ifs); - if (ifs >> 63) - regs->cr_ifs = ifs; - regs->cr_iip = VCPU(vcpu, iip); -} - -/* - VPSR can't keep track of below bits of guest PSR - This function gets guest PSR - */ - -unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu) -{ - unsigned long mask; - struct kvm_pt_regs *regs = vcpu_regs(vcpu); - - mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL | - IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI; - return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask); -} - -void kvm_rsm(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long vpsr; - unsigned long imm24 = (inst.M44.i<<23) | (inst.M44.i2<<21) - | inst.M44.imm; - - vpsr = vcpu_get_psr(vcpu); - vpsr &= (~imm24); - vcpu_set_psr(vcpu, vpsr); -} - -void kvm_ssm(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long vpsr; - unsigned long imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) - | inst.M44.imm; - - vpsr = vcpu_get_psr(vcpu); - vpsr |= imm24; - vcpu_set_psr(vcpu, vpsr); -} - -/* Generate Mask - * Parameter: - * bit -- starting bit - * len -- how many bits - */ -#define MASK(bit,len) \ -({ \ - __u64 ret; \ - \ - __asm __volatile("dep %0=-1, r0, %1, %2"\ - : "=r" (ret): \ - "M" (bit), \ - "M" (len)); \ - ret; \ -}) - -void vcpu_set_psr_l(struct kvm_vcpu *vcpu, unsigned long val) -{ - val = (val & MASK(0, 32)) | (vcpu_get_psr(vcpu) & MASK(32, 32)); - vcpu_set_psr(vcpu, val); -} - -void kvm_mov_to_psr(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long val; - - val = vcpu_get_gr(vcpu, inst.M35.r2); - vcpu_set_psr_l(vcpu, val); -} - -void kvm_mov_from_psr(struct kvm_vcpu *vcpu, INST64 inst) -{ - unsigned long val; - - val = vcpu_get_psr(vcpu); - val = (val & MASK(0, 32)) | (val & MASK(35, 2)); - vcpu_set_gr(vcpu, inst.M33.r1, val, 0); -} - -void vcpu_increment_iip(struct kvm_vcpu *vcpu) -{ - struct kvm_pt_regs *regs = vcpu_regs(vcpu); - struct ia64_psr *ipsr = (struct ia64_psr *)®s->cr_ipsr; - if (ipsr->ri == 2) { - ipsr->ri = 0; - regs->cr_iip += 16; - } else - ipsr->ri++; -} - -void vcpu_decrement_iip(struct kvm_vcpu *vcpu) -{ - struct kvm_pt_regs *regs = vcpu_regs(vcpu); - struct ia64_psr *ipsr = (struct ia64_psr *)®s->cr_ipsr; - - if (ipsr->ri == 0) { - ipsr->ri = 2; - regs->cr_iip -= 16; - } else - ipsr->ri--; -} - -/** Emulate a privileged operation. - * - * - * @param vcpu virtual cpu - * @cause the reason cause virtualization fault - * @opcode the instruction code which cause virtualization fault - */ - -void kvm_emulate(struct kvm_vcpu *vcpu, struct kvm_pt_regs *regs) -{ - unsigned long status, cause, opcode ; - INST64 inst; - - status = IA64_NO_FAULT; - cause = VMX(vcpu, cause); - opcode = VMX(vcpu, opcode); - inst.inst = opcode; - /* - * Switch to actual virtual rid in rr0 and rr4, - * which is required by some tlb related instructions. - */ - prepare_if_physical_mode(vcpu); - - switch (cause) { - case EVENT_RSM: - kvm_rsm(vcpu, inst); - break; - case EVENT_SSM: - kvm_ssm(vcpu, inst); - break; - case EVENT_MOV_TO_PSR: - kvm_mov_to_psr(vcpu, inst); - break; - case EVENT_MOV_FROM_PSR: - kvm_mov_from_psr(vcpu, inst); - break; - case EVENT_MOV_FROM_CR: - kvm_mov_from_cr(vcpu, inst); - break; - case EVENT_MOV_TO_CR: - kvm_mov_to_cr(vcpu, inst); - break; - case EVENT_BSW_0: - vcpu_bsw0(vcpu); - break; - case EVENT_BSW_1: - vcpu_bsw1(vcpu); - break; - case EVENT_COVER: - vcpu_cover(vcpu); - break; - case EVENT_RFI: - vcpu_rfi(vcpu); - break; - case EVENT_ITR_D: - kvm_itr_d(vcpu, inst); - break; - case EVENT_ITR_I: - kvm_itr_i(vcpu, inst); - break; - case EVENT_PTR_D: - kvm_ptr_d(vcpu, inst); - break; - case EVENT_PTR_I: - kvm_ptr_i(vcpu, inst); - break; - case EVENT_ITC_D: - kvm_itc_d(vcpu, inst); - break; - case EVENT_ITC_I: - kvm_itc_i(vcpu, inst); - break; - case EVENT_PTC_L: - kvm_ptc_l(vcpu, inst); - break; - case EVENT_PTC_G: - kvm_ptc_g(vcpu, inst); - break; - case EVENT_PTC_GA: - kvm_ptc_ga(vcpu, inst); - break; - case EVENT_PTC_E: - kvm_ptc_e(vcpu, inst); - break; - case EVENT_MOV_TO_RR: - kvm_mov_to_rr(vcpu, inst); - break; - case EVENT_MOV_FROM_RR: - kvm_mov_from_rr(vcpu, inst); - break; - case EVENT_THASH: - kvm_thash(vcpu, inst); - break; - case EVENT_TTAG: - kvm_ttag(vcpu, inst); - break; - case EVENT_TPA: - status = kvm_tpa(vcpu, inst); - break; - case EVENT_TAK: - kvm_tak(vcpu, inst); - break; - case EVENT_MOV_TO_AR_IMM: - kvm_mov_to_ar_imm(vcpu, inst); - break; - case EVENT_MOV_TO_AR: - kvm_mov_to_ar_reg(vcpu, inst); - break; - case EVENT_MOV_FROM_AR: - kvm_mov_from_ar_reg(vcpu, inst); - break; - case EVENT_MOV_TO_DBR: - kvm_mov_to_dbr(vcpu, inst); - break; - case EVENT_MOV_TO_IBR: - kvm_mov_to_ibr(vcpu, inst); - break; - case EVENT_MOV_TO_PMC: - kvm_mov_to_pmc(vcpu, inst); - break; - case EVENT_MOV_TO_PMD: - kvm_mov_to_pmd(vcpu, inst); - break; - case EVENT_MOV_TO_PKR: - kvm_mov_to_pkr(vcpu, inst); - break; - case EVENT_MOV_FROM_DBR: - kvm_mov_from_dbr(vcpu, inst); - break; - case EVENT_MOV_FROM_IBR: - kvm_mov_from_ibr(vcpu, inst); - break; - case EVENT_MOV_FROM_PMC: - kvm_mov_from_pmc(vcpu, inst); - break; - case EVENT_MOV_FROM_PKR: - kvm_mov_from_pkr(vcpu, inst); - break; - case EVENT_MOV_FROM_CPUID: - kvm_mov_from_cpuid(vcpu, inst); - break; - case EVENT_VMSW: - status = IA64_FAULT; - break; - default: - break; - }; - /*Assume all status is NO_FAULT ?*/ - if (status == IA64_NO_FAULT && cause != EVENT_RFI) - vcpu_increment_iip(vcpu); - - recover_if_physical_mode(vcpu); -} - -void init_vcpu(struct kvm_vcpu *vcpu) -{ - int i; - - vcpu->arch.mode_flags = GUEST_IN_PHY; - VMX(vcpu, vrr[0]) = 0x38; - VMX(vcpu, vrr[1]) = 0x38; - VMX(vcpu, vrr[2]) = 0x38; - VMX(vcpu, vrr[3]) = 0x38; - VMX(vcpu, vrr[4]) = 0x38; - VMX(vcpu, vrr[5]) = 0x38; - VMX(vcpu, vrr[6]) = 0x38; - VMX(vcpu, vrr[7]) = 0x38; - VCPU(vcpu, vpsr) = IA64_PSR_BN; - VCPU(vcpu, dcr) = 0; - /* pta.size must not be 0. The minimum is 15 (32k) */ - VCPU(vcpu, pta) = 15 << 2; - VCPU(vcpu, itv) = 0x10000; - VCPU(vcpu, itm) = 0; - VMX(vcpu, last_itc) = 0; - - VCPU(vcpu, lid) = VCPU_LID(vcpu); - VCPU(vcpu, ivr) = 0; - VCPU(vcpu, tpr) = 0x10000; - VCPU(vcpu, eoi) = 0; - VCPU(vcpu, irr[0]) = 0; - VCPU(vcpu, irr[1]) = 0; - VCPU(vcpu, irr[2]) = 0; - VCPU(vcpu, irr[3]) = 0; - VCPU(vcpu, pmv) = 0x10000; - VCPU(vcpu, cmcv) = 0x10000; - VCPU(vcpu, lrr0) = 0x10000; /* default reset value? */ - VCPU(vcpu, lrr1) = 0x10000; /* default reset value? */ - update_vhpi(vcpu, NULL_VECTOR); - VLSAPIC_XTP(vcpu) = 0x80; /* disabled */ - - for (i = 0; i < 4; i++) - VLSAPIC_INSVC(vcpu, i) = 0; -} - -void kvm_init_all_rr(struct kvm_vcpu *vcpu) -{ - unsigned long psr; - - local_irq_save(psr); - - /* WARNING: not allow co-exist of both virtual mode and physical - * mode in same region - */ - - vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(VMX(vcpu, vrr[VRN0])); - vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(VMX(vcpu, vrr[VRN4])); - - if (is_physical_mode(vcpu)) { - if (vcpu->arch.mode_flags & GUEST_PHY_EMUL) - panic_vm(vcpu, "Machine Status conflicts!\n"); - - ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0); - ia64_dv_serialize_data(); - ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4); - ia64_dv_serialize_data(); - } else { - ia64_set_rr((VRN0 << VRN_SHIFT), - vcpu->arch.metaphysical_saved_rr0); - ia64_dv_serialize_data(); - ia64_set_rr((VRN4 << VRN_SHIFT), - vcpu->arch.metaphysical_saved_rr4); - ia64_dv_serialize_data(); - } - ia64_set_rr((VRN1 << VRN_SHIFT), - vrrtomrr(VMX(vcpu, vrr[VRN1]))); - ia64_dv_serialize_data(); - ia64_set_rr((VRN2 << VRN_SHIFT), - vrrtomrr(VMX(vcpu, vrr[VRN2]))); - ia64_dv_serialize_data(); - ia64_set_rr((VRN3 << VRN_SHIFT), - vrrtomrr(VMX(vcpu, vrr[VRN3]))); - ia64_dv_serialize_data(); - ia64_set_rr((VRN5 << VRN_SHIFT), - vrrtomrr(VMX(vcpu, vrr[VRN5]))); - ia64_dv_serialize_data(); - ia64_set_rr((VRN7 << VRN_SHIFT), - vrrtomrr(VMX(vcpu, vrr[VRN7]))); - ia64_dv_serialize_data(); - ia64_srlz_d(); - ia64_set_psr(psr); -} - -int vmm_entry(void) -{ - struct kvm_vcpu *v; - v = current_vcpu; - - ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)v->arch.vpd, - 0, 0, 0, 0, 0, 0); - kvm_init_vtlb(v); - kvm_init_vhpt(v); - init_vcpu(v); - kvm_init_all_rr(v); - vmm_reset_entry(); - - return 0; -} - -static void kvm_show_registers(struct kvm_pt_regs *regs) -{ - unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri; - - struct kvm_vcpu *vcpu = current_vcpu; - if (vcpu != NULL) - printk("vcpu 0x%p vcpu %d\n", - vcpu, vcpu->vcpu_id); - - printk("psr : %016lx ifs : %016lx ip : [<%016lx>]\n", - regs->cr_ipsr, regs->cr_ifs, ip); - - printk("unat: %016lx pfs : %016lx rsc : %016lx\n", - regs->ar_unat, regs->ar_pfs, regs->ar_rsc); - printk("rnat: %016lx bspstore: %016lx pr : %016lx\n", - regs->ar_rnat, regs->ar_bspstore, regs->pr); - printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n", - regs->loadrs, regs->ar_ccv, regs->ar_fpsr); - printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd); - printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0, - regs->b6, regs->b7); - printk("f6 : %05lx%016lx f7 : %05lx%016lx\n", - regs->f6.u.bits[1], regs->f6.u.bits[0], - regs->f7.u.bits[1], regs->f7.u.bits[0]); - printk("f8 : %05lx%016lx f9 : %05lx%016lx\n", - regs->f8.u.bits[1], regs->f8.u.bits[0], - regs->f9.u.bits[1], regs->f9.u.bits[0]); - printk("f10 : %05lx%016lx f11 : %05lx%016lx\n", - regs->f10.u.bits[1], regs->f10.u.bits[0], - regs->f11.u.bits[1], regs->f11.u.bits[0]); - - printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1, - regs->r2, regs->r3); - printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8, - regs->r9, regs->r10); - printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11, - regs->r12, regs->r13); - printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14, - regs->r15, regs->r16); - printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17, - regs->r18, regs->r19); - printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20, - regs->r21, regs->r22); - printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23, - regs->r24, regs->r25); - printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26, - regs->r27, regs->r28); - printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29, - regs->r30, regs->r31); - -} - -void panic_vm(struct kvm_vcpu *v, const char *fmt, ...) -{ - va_list args; - char buf[256]; - - struct kvm_pt_regs *regs = vcpu_regs(v); - struct exit_ctl_data *p = &v->arch.exit_data; - va_start(args, fmt); - vsnprintf(buf, sizeof(buf), fmt, args); - va_end(args); - printk(buf); - kvm_show_registers(regs); - p->exit_reason = EXIT_REASON_VM_PANIC; - vmm_transition(v); - /*Never to return*/ - while (1); -} diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h deleted file mode 100644 index 988911b4cc7a..000000000000 --- a/arch/ia64/kvm/vcpu.h +++ /dev/null @@ -1,752 +0,0 @@ -/* - * vcpu.h: vcpu routines - * Copyright (c) 2005, Intel Corporation. - * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) - * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com) - * - * Copyright (c) 2007, Intel Corporation. - * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) - * Xiantao Zhang (xiantao.zhang@intel.com) - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - * - */ - - -#ifndef __KVM_VCPU_H__ -#define __KVM_VCPU_H__ - -#include -#include -#include - -#ifndef __ASSEMBLY__ -#include "vti.h" - -#include -#include - -typedef unsigned long IA64_INST; - -typedef union U_IA64_BUNDLE { - unsigned long i64[2]; - struct { unsigned long template:5, slot0:41, slot1a:18, - slot1b:23, slot2:41; }; - /* NOTE: following doesn't work because bitfields can't cross natural - size boundaries - struct { unsigned long template:5, slot0:41, slot1:41, slot2:41; }; */ -} IA64_BUNDLE; - -typedef union U_INST64_A5 { - IA64_INST inst; - struct { unsigned long qp:6, r1:7, imm7b:7, r3:2, imm5c:5, - imm9d:9, s:1, major:4; }; -} INST64_A5; - -typedef union U_INST64_B4 { - IA64_INST inst; - struct { unsigned long qp:6, btype:3, un3:3, p:1, b2:3, un11:11, x6:6, - wh:2, d:1, un1:1, major:4; }; -} INST64_B4; - -typedef union U_INST64_B8 { - IA64_INST inst; - struct { unsigned long qp:6, un21:21, x6:6, un4:4, major:4; }; -} INST64_B8; - -typedef union U_INST64_B9 { - IA64_INST inst; - struct { unsigned long qp:6, imm20:20, :1, x6:6, :3, i:1, major:4; }; -} INST64_B9; - -typedef union U_INST64_I19 { - IA64_INST inst; - struct { unsigned long qp:6, imm20:20, :1, x6:6, x3:3, i:1, major:4; }; -} INST64_I19; - -typedef union U_INST64_I26 { - IA64_INST inst; - struct { unsigned long qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4; }; -} INST64_I26; - -typedef union U_INST64_I27 { - IA64_INST inst; - struct { unsigned long qp:6, :7, imm:7, ar3:7, x6:6, x3:3, s:1, major:4; }; -} INST64_I27; - -typedef union U_INST64_I28 { /* not privileged (mov from AR) */ - IA64_INST inst; - struct { unsigned long qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4; }; -} INST64_I28; - -typedef union U_INST64_M28 { - IA64_INST inst; - struct { unsigned long qp:6, :14, r3:7, x6:6, x3:3, :1, major:4; }; -} INST64_M28; - -typedef union U_INST64_M29 { - IA64_INST inst; - struct { unsigned long qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4; }; -} INST64_M29; - -typedef union U_INST64_M30 { - IA64_INST inst; - struct { unsigned long qp:6, :7, imm:7, ar3:7, x4:4, x2:2, - x3:3, s:1, major:4; }; -} INST64_M30; - -typedef union U_INST64_M31 { - IA64_INST inst; - struct { unsigned long qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4; }; -} INST64_M31; - -typedef union U_INST64_M32 { - IA64_INST inst; - struct { unsigned long qp:6, :7, r2:7, cr3:7, x6:6, x3:3, :1, major:4; }; -} INST64_M32; - -typedef union U_INST64_M33 { - IA64_INST inst; - struct { unsigned long qp:6, r1:7, :7, cr3:7, x6:6, x3:3, :1, major:4; }; -} INST64_M33; - -typedef union U_INST64_M35 { - IA64_INST inst; - struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; }; - -} INST64_M35; - -typedef union U_INST64_M36 { - IA64_INST inst; - struct { unsigned long qp:6, r1:7, :14, x6:6, x3:3, :1, major:4; }; -} INST64_M36; - -typedef union U_INST64_M37 { - IA64_INST inst; - struct { unsigned long qp:6, imm20a:20, :1, x4:4, x2:2, x3:3, - i:1, major:4; }; -} INST64_M37; - -typedef union U_INST64_M41 { - IA64_INST inst; - struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; }; -} INST64_M41; - -typedef union U_INST64_M42 { - IA64_INST inst; - struct { unsigned long qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; }; -} INST64_M42; - -typedef union U_INST64_M43 { - IA64_INST inst; - struct { unsigned long qp:6, r1:7, :7, r3:7, x6:6, x3:3, :1, major:4; }; -} INST64_M43; - -typedef union U_INST64_M44 { - IA64_INST inst; - struct { unsigned long qp:6, imm:21, x4:4, i2:2, x3:3, i:1, major:4; }; -} INST64_M44; - -typedef union U_INST64_M45 { - IA64_INST inst; - struct { unsigned long qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; }; -} INST64_M45; - -typedef union U_INST64_M46 { - IA64_INST inst; - struct { unsigned long qp:6, r1:7, un7:7, r3:7, x6:6, - x3:3, un1:1, major:4; }; -} INST64_M46; - -typedef union U_INST64_M47 { - IA64_INST inst; - struct { unsigned long qp:6, un14:14, r3:7, x6:6, x3:3, un1:1, major:4; }; -} INST64_M47; - -typedef union U_INST64_M1{ - IA64_INST inst; - struct { unsigned long qp:6, r1:7, un7:7, r3:7, x:1, hint:2, - x6:6, m:1, major:4; }; -} INST64_M1; - -typedef union U_INST64_M2{ - IA64_INST inst; - struct { unsigned long qp:6, r1:7, r2:7, r3:7, x:1, hint:2, - x6:6, m:1, major:4; }; -} INST64_M2; - -typedef union U_INST64_M3{ - IA64_INST inst; - struct { unsigned long qp:6, r1:7, imm7:7, r3:7, i:1, hint:2, - x6:6, s:1, major:4; }; -} INST64_M3; - -typedef union U_INST64_M4 { - IA64_INST inst; - struct { unsigned long qp:6, un7:7, r2:7, r3:7, x:1, hint:2, - x6:6, m:1, major:4; }; -} INST64_M4; - -typedef union U_INST64_M5 { - IA64_INST inst; - struct { unsigned long qp:6, imm7:7, r2:7, r3:7, i:1, hint:2, - x6:6, s:1, major:4; }; -} INST64_M5; - -typedef union U_INST64_M6 { - IA64_INST inst; - struct { unsigned long qp:6, f1:7, un7:7, r3:7, x:1, hint:2, - x6:6, m:1, major:4; }; -} INST64_M6; - -typedef union U_INST64_M9 { - IA64_INST inst; - struct { unsigned long qp:6, :7, f2:7, r3:7, x:1, hint:2, - x6:6, m:1, major:4; }; -} INST64_M9; - -typedef union U_INST64_M10 { - IA64_INST inst; - struct { unsigned long qp:6, imm7:7, f2:7, r3:7, i:1, hint:2, - x6:6, s:1, major:4; }; -} INST64_M10; - -typedef union U_INST64_M12 { - IA64_INST inst; - struct { unsigned long qp:6, f1:7, f2:7, r3:7, x:1, hint:2, - x6:6, m:1, major:4; }; -} INST64_M12; - -typedef union U_INST64_M15 { - IA64_INST inst; - struct { unsigned long qp:6, :7, imm7:7, r3:7, i:1, hint:2, - x6:6, s:1, major:4; }; -} INST64_M15; - -typedef union U_INST64 { - IA64_INST inst; - struct { unsigned long :37, major:4; } generic; - INST64_A5 A5; /* used in build_hypercall_bundle only */ - INST64_B4 B4; /* used in build_hypercall_bundle only */ - INST64_B8 B8; /* rfi, bsw.[01] */ - INST64_B9 B9; /* break.b */ - INST64_I19 I19; /* used in build_hypercall_bundle only */ - INST64_I26 I26; /* mov register to ar (I unit) */ - INST64_I27 I27; /* mov immediate to ar (I unit) */ - INST64_I28 I28; /* mov from ar (I unit) */ - INST64_M1 M1; /* ld integer */ - INST64_M2 M2; - INST64_M3 M3; - INST64_M4 M4; /* st integer */ - INST64_M5 M5; - INST64_M6 M6; /* ldfd floating pointer */ - INST64_M9 M9; /* stfd floating pointer */ - INST64_M10 M10; /* stfd floating pointer */ - INST64_M12 M12; /* ldfd pair floating pointer */ - INST64_M15 M15; /* lfetch + imm update */ - INST64_M28 M28; /* purge translation cache entry */ - INST64_M29 M29; /* mov register to ar (M unit) */ - INST64_M30 M30; /* mov immediate to ar (M unit) */ - INST64_M31 M31; /* mov from ar (M unit) */ - INST64_M32 M32; /* mov reg to cr */ - INST64_M33 M33; /* mov from cr */ - INST64_M35 M35; /* mov to psr */ - INST64_M36 M36; /* mov from psr */ - INST64_M37 M37; /* break.m */ - INST64_M41 M41; /* translation cache insert */ - INST64_M42 M42; /* mov to indirect reg/translation reg insert*/ - INST64_M43 M43; /* mov from indirect reg */ - INST64_M44 M44; /* set/reset system mask */ - INST64_M45 M45; /* translation purge */ - INST64_M46 M46; /* translation access (tpa,tak) */ - INST64_M47 M47; /* purge translation entry */ -} INST64; - -#define MASK_41 ((unsigned long)0x1ffffffffff) - -/* Virtual address memory attributes encoding */ -#define VA_MATTR_WB 0x0 -#define VA_MATTR_UC 0x4 -#define VA_MATTR_UCE 0x5 -#define VA_MATTR_WC 0x6 -#define VA_MATTR_NATPAGE 0x7 - -#define PMASK(size) (~((size) - 1)) -#define PSIZE(size) (1UL<<(size)) -#define CLEARLSB(ppn, nbits) (((ppn) >> (nbits)) << (nbits)) -#define PAGEALIGN(va, ps) CLEARLSB(va, ps) -#define PAGE_FLAGS_RV_MASK (0x2|(0x3UL<<50)|(((1UL<<11)-1)<<53)) -#define _PAGE_MA_ST (0x1 << 2) /* is reserved for software use */ - -#define ARCH_PAGE_SHIFT 12 - -#define INVALID_TI_TAG (1UL << 63) - -#define VTLB_PTE_P_BIT 0 -#define VTLB_PTE_IO_BIT 60 -#define VTLB_PTE_IO (1UL<> 61))) - -#define vcpu_quick_region_set(_tr_regions,_ifa) \ - do {_tr_regions |= (1 << ((unsigned long)_ifa >> 61)); } while (0) - -static inline void vcpu_set_tr(struct thash_data *trp, u64 pte, u64 itir, - u64 va, u64 rid) -{ - trp->page_flags = pte; - trp->itir = itir; - trp->vadr = va; - trp->rid = rid; -} - -extern u64 kvm_get_mpt_entry(u64 gpfn); - -/* Return I/ */ -static inline u64 __gpfn_is_io(u64 gpfn) -{ - u64 pte; - pte = kvm_get_mpt_entry(gpfn); - if (!(pte & GPFN_INV_MASK)) { - pte = pte & GPFN_IO_MASK; - if (pte != GPFN_PHYS_MMIO) - return pte; - } - return 0; -} -#endif -#define IA64_NO_FAULT 0 -#define IA64_FAULT 1 - -#define VMM_RBS_OFFSET ((VMM_TASK_SIZE + 15) & ~15) - -#define SW_BAD 0 /* Bad mode transitition */ -#define SW_V2P 1 /* Physical emulatino is activated */ -#define SW_P2V 2 /* Exit physical mode emulation */ -#define SW_SELF 3 /* No mode transition */ -#define SW_NOP 4 /* Mode transition, but without action required */ - -#define GUEST_IN_PHY 0x1 -#define GUEST_PHY_EMUL 0x2 - -#define current_vcpu ((struct kvm_vcpu *) ia64_getreg(_IA64_REG_TP)) - -#define VRN_SHIFT 61 -#define VRN_MASK 0xe000000000000000 -#define VRN0 0x0UL -#define VRN1 0x1UL -#define VRN2 0x2UL -#define VRN3 0x3UL -#define VRN4 0x4UL -#define VRN5 0x5UL -#define VRN6 0x6UL -#define VRN7 0x7UL - -#define IRQ_NO_MASKED 0 -#define IRQ_MASKED_BY_VTPR 1 -#define IRQ_MASKED_BY_INSVC 2 /* masked by inservice IRQ */ - -#define PTA_BASE_SHIFT 15 - -#define IA64_PSR_VM_BIT 46 -#define IA64_PSR_VM (__IA64_UL(1) << IA64_PSR_VM_BIT) - -/* Interruption Function State */ -#define IA64_IFS_V_BIT 63 -#define IA64_IFS_V (__IA64_UL(1) << IA64_IFS_V_BIT) - -#define PHY_PAGE_UC (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_UC|_PAGE_AR_RWX) -#define PHY_PAGE_WB (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_WB|_PAGE_AR_RWX) - -#ifndef __ASSEMBLY__ - -#include - -#define is_physical_mode(v) \ - ((v->arch.mode_flags) & GUEST_IN_PHY) - -#define is_virtual_mode(v) \ - (!is_physical_mode(v)) - -#define MODE_IND(psr) \ - (((psr).it << 2) + ((psr).dt << 1) + (psr).rt) - -#ifndef CONFIG_SMP -#define _vmm_raw_spin_lock(x) do {}while(0) -#define _vmm_raw_spin_unlock(x) do {}while(0) -#else -typedef struct { - volatile unsigned int lock; -} vmm_spinlock_t; -#define _vmm_raw_spin_lock(x) \ - do { \ - __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ - __u64 ia64_spinlock_val; \ - ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);\ - if (unlikely(ia64_spinlock_val)) { \ - do { \ - while (*ia64_spinlock_ptr) \ - ia64_barrier(); \ - ia64_spinlock_val = \ - ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);\ - } while (ia64_spinlock_val); \ - } \ - } while (0) - -#define _vmm_raw_spin_unlock(x) \ - do { barrier(); \ - ((vmm_spinlock_t *)x)->lock = 0; } \ -while (0) -#endif - -void vmm_spin_lock(vmm_spinlock_t *lock); -void vmm_spin_unlock(vmm_spinlock_t *lock); -enum { - I_TLB = 1, - D_TLB = 2 -}; - -union kvm_va { - struct { - unsigned long off : 60; /* intra-region offset */ - unsigned long reg : 4; /* region number */ - } f; - unsigned long l; - void *p; -}; - -#define __kvm_pa(x) ({union kvm_va _v; _v.l = (long) (x); \ - _v.f.reg = 0; _v.l; }) -#define __kvm_va(x) ({union kvm_va _v; _v.l = (long) (x); \ - _v.f.reg = -1; _v.p; }) - -#define _REGION_ID(x) ({union ia64_rr _v; _v.val = (long)(x); \ - _v.rid; }) -#define _REGION_PAGE_SIZE(x) ({union ia64_rr _v; _v.val = (long)(x); \ - _v.ps; }) -#define _REGION_HW_WALKER(x) ({union ia64_rr _v; _v.val = (long)(x); \ - _v.ve; }) - -enum vhpt_ref{ DATA_REF, NA_REF, INST_REF, RSE_REF }; -enum tlb_miss_type { INSTRUCTION, DATA, REGISTER }; - -#define VCPU(_v, _x) ((_v)->arch.vpd->_x) -#define VMX(_v, _x) ((_v)->arch._x) - -#define VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.insvc[i]) -#define VLSAPIC_XTP(_v) VMX(_v, xtp) - -static inline unsigned long itir_ps(unsigned long itir) -{ - return ((itir >> 2) & 0x3f); -} - - -/************************************************************************** - VCPU control register access routines - **************************************************************************/ - -static inline u64 vcpu_get_itir(struct kvm_vcpu *vcpu) -{ - return ((u64)VCPU(vcpu, itir)); -} - -static inline void vcpu_set_itir(struct kvm_vcpu *vcpu, u64 val) -{ - VCPU(vcpu, itir) = val; -} - -static inline u64 vcpu_get_ifa(struct kvm_vcpu *vcpu) -{ - return ((u64)VCPU(vcpu, ifa)); -} - -static inline void vcpu_set_ifa(struct kvm_vcpu *vcpu, u64 val) -{ - VCPU(vcpu, ifa) = val; -} - -static inline u64 vcpu_get_iva(struct kvm_vcpu *vcpu) -{ - return ((u64)VCPU(vcpu, iva)); -} - -static inline u64 vcpu_get_pta(struct kvm_vcpu *vcpu) -{ - return ((u64)VCPU(vcpu, pta)); -} - -static inline u64 vcpu_get_lid(struct kvm_vcpu *vcpu) -{ - return ((u64)VCPU(vcpu, lid)); -} - -static inline u64 vcpu_get_tpr(struct kvm_vcpu *vcpu) -{ - return ((u64)VCPU(vcpu, tpr)); -} - -static inline u64 vcpu_get_eoi(struct kvm_vcpu *vcpu) -{ - return (0UL); /*reads of eoi always return 0 */ -} - -static inline u64 vcpu_get_irr0(struct kvm_vcpu *vcpu) -{ - return ((u64)VCPU(vcpu, irr[0])); -} - -static inline u64 vcpu_get_irr1(struct kvm_vcpu *vcpu) -{ - return ((u64)VCPU(vcpu, irr[1])); -} - -static inline u64 vcpu_get_irr2(struct kvm_vcpu *vcpu) -{ - return ((u64)VCPU(vcpu, irr[2])); -} - -static inline u64 vcpu_get_irr3(struct kvm_vcpu *vcpu) -{ - return ((u64)VCPU(vcpu, irr[3])); -} - -static inline void vcpu_set_dcr(struct kvm_vcpu *vcpu, u64 val) -{ - ia64_setreg(_IA64_REG_CR_DCR, val); -} - -static inline void vcpu_set_isr(struct kvm_vcpu *vcpu, u64 val) -{ - VCPU(vcpu, isr) = val; -} - -static inline void vcpu_set_lid(struct kvm_vcpu *vcpu, u64 val) -{ - VCPU(vcpu, lid) = val; -} - -static inline void vcpu_set_ipsr(struct kvm_vcpu *vcpu, u64 val) -{ - VCPU(vcpu, ipsr) = val; -} - -static inline void vcpu_set_iip(struct kvm_vcpu *vcpu, u64 val) -{ - VCPU(vcpu, iip) = val; -} - -static inline void vcpu_set_ifs(struct kvm_vcpu *vcpu, u64 val) -{ - VCPU(vcpu, ifs) = val; -} - -static inline void vcpu_set_iipa(struct kvm_vcpu *vcpu, u64 val) -{ - VCPU(vcpu, iipa) = val; -} - -static inline void vcpu_set_iha(struct kvm_vcpu *vcpu, u64 val) -{ - VCPU(vcpu, iha) = val; -} - - -static inline u64 vcpu_get_rr(struct kvm_vcpu *vcpu, u64 reg) -{ - return vcpu->arch.vrr[reg>>61]; -} - -/************************************************************************** - VCPU debug breakpoint register access routines - **************************************************************************/ - -static inline void vcpu_set_dbr(struct kvm_vcpu *vcpu, u64 reg, u64 val) -{ - __ia64_set_dbr(reg, val); -} - -static inline void vcpu_set_ibr(struct kvm_vcpu *vcpu, u64 reg, u64 val) -{ - ia64_set_ibr(reg, val); -} - -static inline u64 vcpu_get_dbr(struct kvm_vcpu *vcpu, u64 reg) -{ - return ((u64)__ia64_get_dbr(reg)); -} - -static inline u64 vcpu_get_ibr(struct kvm_vcpu *vcpu, u64 reg) -{ - return ((u64)ia64_get_ibr(reg)); -} - -/************************************************************************** - VCPU performance monitor register access routines - **************************************************************************/ -static inline void vcpu_set_pmc(struct kvm_vcpu *vcpu, u64 reg, u64 val) -{ - /* NOTE: Writes to unimplemented PMC registers are discarded */ - ia64_set_pmc(reg, val); -} - -static inline void vcpu_set_pmd(struct kvm_vcpu *vcpu, u64 reg, u64 val) -{ - /* NOTE: Writes to unimplemented PMD registers are discarded */ - ia64_set_pmd(reg, val); -} - -static inline u64 vcpu_get_pmc(struct kvm_vcpu *vcpu, u64 reg) -{ - /* NOTE: Reads from unimplemented PMC registers return zero */ - return ((u64)ia64_get_pmc(reg)); -} - -static inline u64 vcpu_get_pmd(struct kvm_vcpu *vcpu, u64 reg) -{ - /* NOTE: Reads from unimplemented PMD registers return zero */ - return ((u64)ia64_get_pmd(reg)); -} - -static inline unsigned long vrrtomrr(unsigned long val) -{ - union ia64_rr rr; - rr.val = val; - rr.rid = (rr.rid << 4) | 0xe; - if (rr.ps > PAGE_SHIFT) - rr.ps = PAGE_SHIFT; - rr.ve = 1; - return rr.val; -} - - -static inline int highest_bits(int *dat) -{ - u32 bits, bitnum; - int i; - - /* loop for all 256 bits */ - for (i = 7; i >= 0 ; i--) { - bits = dat[i]; - if (bits) { - bitnum = fls(bits); - return i * 32 + bitnum - 1; - } - } - return NULL_VECTOR; -} - -/* - * The pending irq is higher than the inservice one. - * - */ -static inline int is_higher_irq(int pending, int inservice) -{ - return ((pending > inservice) - || ((pending != NULL_VECTOR) - && (inservice == NULL_VECTOR))); -} - -static inline int is_higher_class(int pending, int mic) -{ - return ((pending >> 4) > mic); -} - -/* - * Return 0-255 for pending irq. - * NULL_VECTOR: when no pending. - */ -static inline int highest_pending_irq(struct kvm_vcpu *vcpu) -{ - if (VCPU(vcpu, irr[0]) & (1UL< -#include -#include - -#include "vcpu.h" - -MODULE_AUTHOR("Intel"); -MODULE_LICENSE("GPL"); - -extern char kvm_ia64_ivt; -extern char kvm_asm_mov_from_ar; -extern char kvm_asm_mov_from_ar_sn2; -extern fpswa_interface_t *vmm_fpswa_interface; - -long vmm_sanity = 1; - -struct kvm_vmm_info vmm_info = { - .module = THIS_MODULE, - .vmm_entry = vmm_entry, - .tramp_entry = vmm_trampoline, - .vmm_ivt = (unsigned long)&kvm_ia64_ivt, - .patch_mov_ar = (unsigned long)&kvm_asm_mov_from_ar, - .patch_mov_ar_sn2 = (unsigned long)&kvm_asm_mov_from_ar_sn2, -}; - -static int __init kvm_vmm_init(void) -{ - - vmm_fpswa_interface = fpswa_interface; - - /*Register vmm data to kvm side*/ - return kvm_init(&vmm_info, 1024, 0, THIS_MODULE); -} - -static void __exit kvm_vmm_exit(void) -{ - kvm_exit(); - return ; -} - -void vmm_spin_lock(vmm_spinlock_t *lock) -{ - _vmm_raw_spin_lock(lock); -} - -void vmm_spin_unlock(vmm_spinlock_t *lock) -{ - _vmm_raw_spin_unlock(lock); -} - -static void vcpu_debug_exit(struct kvm_vcpu *vcpu) -{ - struct exit_ctl_data *p = &vcpu->arch.exit_data; - long psr; - - local_irq_save(psr); - p->exit_reason = EXIT_REASON_DEBUG; - vmm_transition(vcpu); - local_irq_restore(psr); -} - -asmlinkage int printk(const char *fmt, ...) -{ - struct kvm_vcpu *vcpu = current_vcpu; - va_list args; - int r; - - memset(vcpu->arch.log_buf, 0, VMM_LOG_LEN); - va_start(args, fmt); - r = vsnprintf(vcpu->arch.log_buf, VMM_LOG_LEN, fmt, args); - va_end(args); - vcpu_debug_exit(vcpu); - return r; -} - -module_init(kvm_vmm_init) -module_exit(kvm_vmm_exit) diff --git a/arch/ia64/kvm/vmm_ivt.S b/arch/ia64/kvm/vmm_ivt.S deleted file mode 100644 index 397e34a63e18..000000000000 --- a/arch/ia64/kvm/vmm_ivt.S +++ /dev/null @@ -1,1392 +0,0 @@ -/* - * arch/ia64/kvm/vmm_ivt.S - * - * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co - * Stephane Eranian - * David Mosberger - * Copyright (C) 2000, 2002-2003 Intel Co - * Asit Mallick - * Suresh Siddha - * Kenneth Chen - * Fenghua Yu - * - * - * 00/08/23 Asit Mallick TLB handling - * for SMP - * 00/12/20 David Mosberger-Tang DTLB/ITLB - * handler now uses virtual PT. - * - * 07/6/20 Xuefei Xu (Anthony Xu) (anthony.xu@intel.com) - * Supporting Intel virtualization architecture - * - */ - -/* - * This file defines the interruption vector table used by the CPU. - * It does not include one entry per possible cause of interruption. - * - * The first 20 entries of the table contain 64 bundles each while the - * remaining 48 entries contain only 16 bundles each. - * - * The 64 bundles are used to allow inlining the whole handler for - * critical - * interruptions like TLB misses. - * - * For each entry, the comment is as follows: - * - * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss - * (12,51) - * entry offset ----/ / / / - * / - * entry number ---------/ / / - * / - * size of the entry -------------/ / - * / - * vector name -------------------------------------/ - * / - * interruptions triggering this vector - * ----------------------/ - * - * The table is 32KB in size and must be aligned on 32KB - * boundary. - * (The CPU ignores the 15 lower bits of the address) - * - * Table is based upon EAS2.6 (Oct 1999) - */ - - -#include -#include -#include - -#include "asm-offsets.h" -#include "vcpu.h" -#include "kvm_minstate.h" -#include "vti.h" - -#if 0 -# define PSR_DEFAULT_BITS psr.ac -#else -# define PSR_DEFAULT_BITS 0 -#endif - -#define KVM_FAULT(n) \ - kvm_fault_##n:; \ - mov r19=n;; \ - br.sptk.many kvm_vmm_panic; \ - ;; \ - -#define KVM_REFLECT(n) \ - mov r31=pr; \ - mov r19=n; /* prepare to save predicates */ \ - mov r29=cr.ipsr; \ - ;; \ - tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \ -(p7) br.sptk.many kvm_dispatch_reflection; \ - br.sptk.many kvm_vmm_panic; \ - -GLOBAL_ENTRY(kvm_vmm_panic) - KVM_SAVE_MIN_WITH_COVER_R19 - alloc r14=ar.pfs,0,0,1,0 - mov out0=r15 - adds r3=8,r2 // set up second base pointer - ;; - ssm psr.ic - ;; - srlz.i // guarantee that interruption collection is on - ;; - (p15) ssm psr.i // restore psr. - addl r14=@gprel(ia64_leave_hypervisor),gp - ;; - KVM_SAVE_REST - mov rp=r14 - ;; - br.call.sptk.many b6=vmm_panic_handler; -END(kvm_vmm_panic) - - .section .text..ivt,"ax" - - .align 32768 // align on 32KB boundary - .global kvm_ia64_ivt -kvm_ia64_ivt: -/////////////////////////////////////////////////////////////// -// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47) -ENTRY(kvm_vhpt_miss) - KVM_FAULT(0) -END(kvm_vhpt_miss) - - .org kvm_ia64_ivt+0x400 -//////////////////////////////////////////////////////////////// -// 0x0400 Entry 1 (size 64 bundles) ITLB (21) -ENTRY(kvm_itlb_miss) - mov r31 = pr - mov r29=cr.ipsr; - ;; - tbit.z p6,p7=r29,IA64_PSR_VM_BIT; -(p6) br.sptk kvm_alt_itlb_miss - mov r19 = 1 - br.sptk kvm_itlb_miss_dispatch - KVM_FAULT(1); -END(kvm_itlb_miss) - - .org kvm_ia64_ivt+0x0800 -////////////////////////////////////////////////////////////////// -// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48) -ENTRY(kvm_dtlb_miss) - mov r31 = pr - mov r29=cr.ipsr; - ;; - tbit.z p6,p7=r29,IA64_PSR_VM_BIT; -(p6) br.sptk kvm_alt_dtlb_miss - br.sptk kvm_dtlb_miss_dispatch -END(kvm_dtlb_miss) - - .org kvm_ia64_ivt+0x0c00 -//////////////////////////////////////////////////////////////////// -// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) -ENTRY(kvm_alt_itlb_miss) - mov r16=cr.ifa // get address that caused the TLB miss - ;; - movl r17=PAGE_KERNEL - mov r24=cr.ipsr - movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) - ;; - and r19=r19,r16 // clear ed, reserved bits, and PTE control bits - ;; - or r19=r17,r19 // insert PTE control bits into r19 - ;; - movl r20=IA64_GRANULE_SHIFT<<2 - ;; - mov cr.itir=r20 - ;; - itc.i r19 // insert the TLB entry - mov pr=r31,-1 - rfi -END(kvm_alt_itlb_miss) - - .org kvm_ia64_ivt+0x1000 -///////////////////////////////////////////////////////////////////// -// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) -ENTRY(kvm_alt_dtlb_miss) - mov r16=cr.ifa // get address that caused the TLB miss - ;; - movl r17=PAGE_KERNEL - movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) - mov r24=cr.ipsr - ;; - and r19=r19,r16 // clear ed, reserved bits, and PTE control bits - ;; - or r19=r19,r17 // insert PTE control bits into r19 - ;; - movl r20=IA64_GRANULE_SHIFT<<2 - ;; - mov cr.itir=r20 - ;; - itc.d r19 // insert the TLB entry - mov pr=r31,-1 - rfi -END(kvm_alt_dtlb_miss) - - .org kvm_ia64_ivt+0x1400 -////////////////////////////////////////////////////////////////////// -// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45) -ENTRY(kvm_nested_dtlb_miss) - KVM_FAULT(5) -END(kvm_nested_dtlb_miss) - - .org kvm_ia64_ivt+0x1800 -///////////////////////////////////////////////////////////////////// -// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24) -ENTRY(kvm_ikey_miss) - KVM_REFLECT(6) -END(kvm_ikey_miss) - - .org kvm_ia64_ivt+0x1c00 -///////////////////////////////////////////////////////////////////// -// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) -ENTRY(kvm_dkey_miss) - KVM_REFLECT(7) -END(kvm_dkey_miss) - - .org kvm_ia64_ivt+0x2000 -//////////////////////////////////////////////////////////////////// -// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54) -ENTRY(kvm_dirty_bit) - KVM_REFLECT(8) -END(kvm_dirty_bit) - - .org kvm_ia64_ivt+0x2400 -//////////////////////////////////////////////////////////////////// -// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27) -ENTRY(kvm_iaccess_bit) - KVM_REFLECT(9) -END(kvm_iaccess_bit) - - .org kvm_ia64_ivt+0x2800 -/////////////////////////////////////////////////////////////////// -// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55) -ENTRY(kvm_daccess_bit) - KVM_REFLECT(10) -END(kvm_daccess_bit) - - .org kvm_ia64_ivt+0x2c00 -///////////////////////////////////////////////////////////////// -// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33) -ENTRY(kvm_break_fault) - mov r31=pr - mov r19=11 - mov r29=cr.ipsr - ;; - KVM_SAVE_MIN_WITH_COVER_R19 - ;; - alloc r14=ar.pfs,0,0,4,0 //(must be first in insn group!) - mov out0=cr.ifa - mov out2=cr.isr // FIXME: pity to make this slow access twice - mov out3=cr.iim // FIXME: pity to make this slow access twice - adds r3=8,r2 // set up second base pointer - ;; - ssm psr.ic - ;; - srlz.i // guarantee that interruption collection is on - ;; - (p15)ssm psr.i // restore psr.i - addl r14=@gprel(ia64_leave_hypervisor),gp - ;; - KVM_SAVE_REST - mov rp=r14 - ;; - adds out1=16,sp - br.call.sptk.many b6=kvm_ia64_handle_break - ;; -END(kvm_break_fault) - - .org kvm_ia64_ivt+0x3000 -///////////////////////////////////////////////////////////////// -// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4) -ENTRY(kvm_interrupt) - mov r31=pr // prepare to save predicates - mov r19=12 - mov r29=cr.ipsr - ;; - tbit.z p6,p7=r29,IA64_PSR_VM_BIT - tbit.z p0,p15=r29,IA64_PSR_I_BIT - ;; -(p7) br.sptk kvm_dispatch_interrupt - ;; - mov r27=ar.rsc /* M */ - mov r20=r1 /* A */ - mov r25=ar.unat /* M */ - mov r26=ar.pfs /* I */ - mov r28=cr.iip /* M */ - cover /* B (or nothing) */ - ;; - mov r1=sp - ;; - invala /* M */ - mov r30=cr.ifs - ;; - addl r1=-VMM_PT_REGS_SIZE,r1 - ;; - adds r17=2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */ - adds r16=PT(CR_IPSR),r1 - ;; - lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES - st8 [r16]=r29 /* save cr.ipsr */ - ;; - lfetch.fault.excl.nt1 [r17] - mov r29=b0 - ;; - adds r16=PT(R8),r1 /* initialize first base pointer */ - adds r17=PT(R9),r1 /* initialize second base pointer */ - mov r18=r0 /* make sure r18 isn't NaT */ - ;; -.mem.offset 0,0; st8.spill [r16]=r8,16 -.mem.offset 8,0; st8.spill [r17]=r9,16 - ;; -.mem.offset 0,0; st8.spill [r16]=r10,24 -.mem.offset 8,0; st8.spill [r17]=r11,24 - ;; - st8 [r16]=r28,16 /* save cr.iip */ - st8 [r17]=r30,16 /* save cr.ifs */ - mov r8=ar.fpsr /* M */ - mov r9=ar.csd - mov r10=ar.ssd - movl r11=FPSR_DEFAULT /* L-unit */ - ;; - st8 [r16]=r25,16 /* save ar.unat */ - st8 [r17]=r26,16 /* save ar.pfs */ - shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */ - ;; - st8 [r16]=r27,16 /* save ar.rsc */ - adds r17=16,r17 /* skip over ar_rnat field */ - ;; - st8 [r17]=r31,16 /* save predicates */ - adds r16=16,r16 /* skip over ar_bspstore field */ - ;; - st8 [r16]=r29,16 /* save b0 */ - st8 [r17]=r18,16 /* save ar.rsc value for "loadrs" */ - ;; -.mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */ -.mem.offset 8,0; st8.spill [r17]=r12,16 - adds r12=-16,r1 - /* switch to kernel memory stack (with 16 bytes of scratch) */ - ;; -.mem.offset 0,0; st8.spill [r16]=r13,16 -.mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */ - ;; -.mem.offset 0,0; st8.spill [r16]=r15,16 -.mem.offset 8,0; st8.spill [r17]=r14,16 - dep r14=-1,r0,60,4 - ;; -.mem.offset 0,0; st8.spill [r16]=r2,16 -.mem.offset 8,0; st8.spill [r17]=r3,16 - adds r2=VMM_PT_REGS_R16_OFFSET,r1 - adds r14 = VMM_VCPU_GP_OFFSET,r13 - ;; - mov r8=ar.ccv - ld8 r14 = [r14] - ;; - mov r1=r14 /* establish kernel global pointer */ - ;; \ - bsw.1 - ;; - alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group - mov out0=r13 - ;; - ssm psr.ic - ;; - srlz.i - ;; - //(p15) ssm psr.i - adds r3=8,r2 // set up second base pointer for SAVE_REST - srlz.i // ensure everybody knows psr.ic is back on - ;; -.mem.offset 0,0; st8.spill [r2]=r16,16 -.mem.offset 8,0; st8.spill [r3]=r17,16 - ;; -.mem.offset 0,0; st8.spill [r2]=r18,16 -.mem.offset 8,0; st8.spill [r3]=r19,16 - ;; -.mem.offset 0,0; st8.spill [r2]=r20,16 -.mem.offset 8,0; st8.spill [r3]=r21,16 - mov r18=b6 - ;; -.mem.offset 0,0; st8.spill [r2]=r22,16 -.mem.offset 8,0; st8.spill [r3]=r23,16 - mov r19=b7 - ;; -.mem.offset 0,0; st8.spill [r2]=r24,16 -.mem.offset 8,0; st8.spill [r3]=r25,16 - ;; -.mem.offset 0,0; st8.spill [r2]=r26,16 -.mem.offset 8,0; st8.spill [r3]=r27,16 - ;; -.mem.offset 0,0; st8.spill [r2]=r28,16 -.mem.offset 8,0; st8.spill [r3]=r29,16 - ;; -.mem.offset 0,0; st8.spill [r2]=r30,16 -.mem.offset 8,0; st8.spill [r3]=r31,32 - ;; - mov ar.fpsr=r11 /* M-unit */ - st8 [r2]=r8,8 /* ar.ccv */ - adds r24=PT(B6)-PT(F7),r3 - ;; - stf.spill [r2]=f6,32 - stf.spill [r3]=f7,32 - ;; - stf.spill [r2]=f8,32 - stf.spill [r3]=f9,32 - ;; - stf.spill [r2]=f10 - stf.spill [r3]=f11 - adds r25=PT(B7)-PT(F11),r3 - ;; - st8 [r24]=r18,16 /* b6 */ - st8 [r25]=r19,16 /* b7 */ - ;; - st8 [r24]=r9 /* ar.csd */ - st8 [r25]=r10 /* ar.ssd */ - ;; - srlz.d // make sure we see the effect of cr.ivr - addl r14=@gprel(ia64_leave_nested),gp - ;; - mov rp=r14 - br.call.sptk.many b6=kvm_ia64_handle_irq - ;; -END(kvm_interrupt) - - .global kvm_dispatch_vexirq - .org kvm_ia64_ivt+0x3400 -////////////////////////////////////////////////////////////////////// -// 0x3400 Entry 13 (size 64 bundles) Reserved -ENTRY(kvm_virtual_exirq) - mov r31=pr - mov r19=13 - mov r30 =r0 - ;; -kvm_dispatch_vexirq: - cmp.eq p6,p0 = 1,r30 - ;; -(p6) add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21 - ;; -(p6) ld8 r1 = [r29] - ;; - KVM_SAVE_MIN_WITH_COVER_R19 - alloc r14=ar.pfs,0,0,1,0 - mov out0=r13 - - ssm psr.ic - ;; - srlz.i // guarantee that interruption collection is on - ;; - (p15) ssm psr.i // restore psr.i - adds r3=8,r2 // set up second base pointer - ;; - KVM_SAVE_REST - addl r14=@gprel(ia64_leave_hypervisor),gp - ;; - mov rp=r14 - br.call.sptk.many b6=kvm_vexirq -END(kvm_virtual_exirq) - - .org kvm_ia64_ivt+0x3800 -///////////////////////////////////////////////////////////////////// -// 0x3800 Entry 14 (size 64 bundles) Reserved - KVM_FAULT(14) - // this code segment is from 2.6.16.13 - - .org kvm_ia64_ivt+0x3c00 -/////////////////////////////////////////////////////////////////////// -// 0x3c00 Entry 15 (size 64 bundles) Reserved - KVM_FAULT(15) - - .org kvm_ia64_ivt+0x4000 -/////////////////////////////////////////////////////////////////////// -// 0x4000 Entry 16 (size 64 bundles) Reserved - KVM_FAULT(16) - - .org kvm_ia64_ivt+0x4400 -////////////////////////////////////////////////////////////////////// -// 0x4400 Entry 17 (size 64 bundles) Reserved - KVM_FAULT(17) - - .org kvm_ia64_ivt+0x4800 -////////////////////////////////////////////////////////////////////// -// 0x4800 Entry 18 (size 64 bundles) Reserved - KVM_FAULT(18) - - .org kvm_ia64_ivt+0x4c00 -////////////////////////////////////////////////////////////////////// -// 0x4c00 Entry 19 (size 64 bundles) Reserved - KVM_FAULT(19) - - .org kvm_ia64_ivt+0x5000 -////////////////////////////////////////////////////////////////////// -// 0x5000 Entry 20 (size 16 bundles) Page Not Present -ENTRY(kvm_page_not_present) - KVM_REFLECT(20) -END(kvm_page_not_present) - - .org kvm_ia64_ivt+0x5100 -/////////////////////////////////////////////////////////////////////// -// 0x5100 Entry 21 (size 16 bundles) Key Permission vector -ENTRY(kvm_key_permission) - KVM_REFLECT(21) -END(kvm_key_permission) - - .org kvm_ia64_ivt+0x5200 -////////////////////////////////////////////////////////////////////// -// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) -ENTRY(kvm_iaccess_rights) - KVM_REFLECT(22) -END(kvm_iaccess_rights) - - .org kvm_ia64_ivt+0x5300 -////////////////////////////////////////////////////////////////////// -// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) -ENTRY(kvm_daccess_rights) - KVM_REFLECT(23) -END(kvm_daccess_rights) - - .org kvm_ia64_ivt+0x5400 -///////////////////////////////////////////////////////////////////// -// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) -ENTRY(kvm_general_exception) - KVM_REFLECT(24) - KVM_FAULT(24) -END(kvm_general_exception) - - .org kvm_ia64_ivt+0x5500 -////////////////////////////////////////////////////////////////////// -// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35) -ENTRY(kvm_disabled_fp_reg) - KVM_REFLECT(25) -END(kvm_disabled_fp_reg) - - .org kvm_ia64_ivt+0x5600 -//////////////////////////////////////////////////////////////////// -// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50) -ENTRY(kvm_nat_consumption) - KVM_REFLECT(26) -END(kvm_nat_consumption) - - .org kvm_ia64_ivt+0x5700 -///////////////////////////////////////////////////////////////////// -// 0x5700 Entry 27 (size 16 bundles) Speculation (40) -ENTRY(kvm_speculation_vector) - KVM_REFLECT(27) -END(kvm_speculation_vector) - - .org kvm_ia64_ivt+0x5800 -///////////////////////////////////////////////////////////////////// -// 0x5800 Entry 28 (size 16 bundles) Reserved - KVM_FAULT(28) - - .org kvm_ia64_ivt+0x5900 -/////////////////////////////////////////////////////////////////// -// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56) -ENTRY(kvm_debug_vector) - KVM_FAULT(29) -END(kvm_debug_vector) - - .org kvm_ia64_ivt+0x5a00 -/////////////////////////////////////////////////////////////// -// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57) -ENTRY(kvm_unaligned_access) - KVM_REFLECT(30) -END(kvm_unaligned_access) - - .org kvm_ia64_ivt+0x5b00 -////////////////////////////////////////////////////////////////////// -// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57) -ENTRY(kvm_unsupported_data_reference) - KVM_REFLECT(31) -END(kvm_unsupported_data_reference) - - .org kvm_ia64_ivt+0x5c00 -//////////////////////////////////////////////////////////////////// -// 0x5c00 Entry 32 (size 16 bundles) Floating Point FAULT (65) -ENTRY(kvm_floating_point_fault) - KVM_REFLECT(32) -END(kvm_floating_point_fault) - - .org kvm_ia64_ivt+0x5d00 -///////////////////////////////////////////////////////////////////// -// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66) -ENTRY(kvm_floating_point_trap) - KVM_REFLECT(33) -END(kvm_floating_point_trap) - - .org kvm_ia64_ivt+0x5e00 -////////////////////////////////////////////////////////////////////// -// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66) -ENTRY(kvm_lower_privilege_trap) - KVM_REFLECT(34) -END(kvm_lower_privilege_trap) - - .org kvm_ia64_ivt+0x5f00 -////////////////////////////////////////////////////////////////////// -// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68) -ENTRY(kvm_taken_branch_trap) - KVM_REFLECT(35) -END(kvm_taken_branch_trap) - - .org kvm_ia64_ivt+0x6000 -//////////////////////////////////////////////////////////////////// -// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69) -ENTRY(kvm_single_step_trap) - KVM_REFLECT(36) -END(kvm_single_step_trap) - .global kvm_virtualization_fault_back - .org kvm_ia64_ivt+0x6100 -///////////////////////////////////////////////////////////////////// -// 0x6100 Entry 37 (size 16 bundles) Virtualization Fault -ENTRY(kvm_virtualization_fault) - mov r31=pr - adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 - ;; - st8 [r16] = r1 - adds r17 = VMM_VCPU_GP_OFFSET, r21 - ;; - ld8 r1 = [r17] - cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24 - cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24 - cmp.eq p8,p0=EVENT_MOV_TO_RR,r24 - cmp.eq p9,p0=EVENT_RSM,r24 - cmp.eq p10,p0=EVENT_SSM,r24 - cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24 - cmp.eq p12,p0=EVENT_THASH,r24 -(p6) br.dptk.many kvm_asm_mov_from_ar -(p7) br.dptk.many kvm_asm_mov_from_rr -(p8) br.dptk.many kvm_asm_mov_to_rr -(p9) br.dptk.many kvm_asm_rsm -(p10) br.dptk.many kvm_asm_ssm -(p11) br.dptk.many kvm_asm_mov_to_psr -(p12) br.dptk.many kvm_asm_thash - ;; -kvm_virtualization_fault_back: - adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 - ;; - ld8 r1 = [r16] - ;; - mov r19=37 - adds r16 = VMM_VCPU_CAUSE_OFFSET,r21 - adds r17 = VMM_VCPU_OPCODE_OFFSET,r21 - ;; - st8 [r16] = r24 - st8 [r17] = r25 - ;; - cmp.ne p6,p0=EVENT_RFI, r24 -(p6) br.sptk kvm_dispatch_virtualization_fault - ;; - adds r18=VMM_VPD_BASE_OFFSET,r21 - ;; - ld8 r18=[r18] - ;; - adds r18=VMM_VPD_VIFS_OFFSET,r18 - ;; - ld8 r18=[r18] - ;; - tbit.z p6,p0=r18,63 -(p6) br.sptk kvm_dispatch_virtualization_fault - ;; -//if vifs.v=1 desert current register frame - alloc r18=ar.pfs,0,0,0,0 - br.sptk kvm_dispatch_virtualization_fault -END(kvm_virtualization_fault) - - .org kvm_ia64_ivt+0x6200 -////////////////////////////////////////////////////////////// -// 0x6200 Entry 38 (size 16 bundles) Reserved - KVM_FAULT(38) - - .org kvm_ia64_ivt+0x6300 -///////////////////////////////////////////////////////////////// -// 0x6300 Entry 39 (size 16 bundles) Reserved - KVM_FAULT(39) - - .org kvm_ia64_ivt+0x6400 -///////////////////////////////////////////////////////////////// -// 0x6400 Entry 40 (size 16 bundles) Reserved - KVM_FAULT(40) - - .org kvm_ia64_ivt+0x6500 -////////////////////////////////////////////////////////////////// -// 0x6500 Entry 41 (size 16 bundles) Reserved - KVM_FAULT(41) - - .org kvm_ia64_ivt+0x6600 -////////////////////////////////////////////////////////////////// -// 0x6600 Entry 42 (size 16 bundles) Reserved - KVM_FAULT(42) - - .org kvm_ia64_ivt+0x6700 -////////////////////////////////////////////////////////////////// -// 0x6700 Entry 43 (size 16 bundles) Reserved - KVM_FAULT(43) - - .org kvm_ia64_ivt+0x6800 -////////////////////////////////////////////////////////////////// -// 0x6800 Entry 44 (size 16 bundles) Reserved - KVM_FAULT(44) - - .org kvm_ia64_ivt+0x6900 -/////////////////////////////////////////////////////////////////// -// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception -//(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77) -ENTRY(kvm_ia32_exception) - KVM_FAULT(45) -END(kvm_ia32_exception) - - .org kvm_ia64_ivt+0x6a00 -//////////////////////////////////////////////////////////////////// -// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) -ENTRY(kvm_ia32_intercept) - KVM_FAULT(47) -END(kvm_ia32_intercept) - - .org kvm_ia64_ivt+0x6c00 -///////////////////////////////////////////////////////////////////// -// 0x6c00 Entry 48 (size 16 bundles) Reserved - KVM_FAULT(48) - - .org kvm_ia64_ivt+0x6d00 -////////////////////////////////////////////////////////////////////// -// 0x6d00 Entry 49 (size 16 bundles) Reserved - KVM_FAULT(49) - - .org kvm_ia64_ivt+0x6e00 -////////////////////////////////////////////////////////////////////// -// 0x6e00 Entry 50 (size 16 bundles) Reserved - KVM_FAULT(50) - - .org kvm_ia64_ivt+0x6f00 -///////////////////////////////////////////////////////////////////// -// 0x6f00 Entry 51 (size 16 bundles) Reserved - KVM_FAULT(52) - - .org kvm_ia64_ivt+0x7100 -//////////////////////////////////////////////////////////////////// -// 0x7100 Entry 53 (size 16 bundles) Reserved - KVM_FAULT(53) - - .org kvm_ia64_ivt+0x7200 -///////////////////////////////////////////////////////////////////// -// 0x7200 Entry 54 (size 16 bundles) Reserved - KVM_FAULT(54) - - .org kvm_ia64_ivt+0x7300 -//////////////////////////////////////////////////////////////////// -// 0x7300 Entry 55 (size 16 bundles) Reserved - KVM_FAULT(55) - - .org kvm_ia64_ivt+0x7400 -//////////////////////////////////////////////////////////////////// -// 0x7400 Entry 56 (size 16 bundles) Reserved - KVM_FAULT(56) - - .org kvm_ia64_ivt+0x7500 -///////////////////////////////////////////////////////////////////// -// 0x7500 Entry 57 (size 16 bundles) Reserved - KVM_FAULT(57) - - .org kvm_ia64_ivt+0x7600 -///////////////////////////////////////////////////////////////////// -// 0x7600 Entry 58 (size 16 bundles) Reserved - KVM_FAULT(58) - - .org kvm_ia64_ivt+0x7700 -//////////////////////////////////////////////////////////////////// -// 0x7700 Entry 59 (size 16 bundles) Reserved - KVM_FAULT(59) - - .org kvm_ia64_ivt+0x7800 -//////////////////////////////////////////////////////////////////// -// 0x7800 Entry 60 (size 16 bundles) Reserved - KVM_FAULT(60) - - .org kvm_ia64_ivt+0x7900 -///////////////////////////////////////////////////////////////////// -// 0x7900 Entry 61 (size 16 bundles) Reserved - KVM_FAULT(61) - - .org kvm_ia64_ivt+0x7a00 -///////////////////////////////////////////////////////////////////// -// 0x7a00 Entry 62 (size 16 bundles) Reserved - KVM_FAULT(62) - - .org kvm_ia64_ivt+0x7b00 -///////////////////////////////////////////////////////////////////// -// 0x7b00 Entry 63 (size 16 bundles) Reserved - KVM_FAULT(63) - - .org kvm_ia64_ivt+0x7c00 -//////////////////////////////////////////////////////////////////// -// 0x7c00 Entry 64 (size 16 bundles) Reserved - KVM_FAULT(64) - - .org kvm_ia64_ivt+0x7d00 -///////////////////////////////////////////////////////////////////// -// 0x7d00 Entry 65 (size 16 bundles) Reserved - KVM_FAULT(65) - - .org kvm_ia64_ivt+0x7e00 -///////////////////////////////////////////////////////////////////// -// 0x7e00 Entry 66 (size 16 bundles) Reserved - KVM_FAULT(66) - - .org kvm_ia64_ivt+0x7f00 -//////////////////////////////////////////////////////////////////// -// 0x7f00 Entry 67 (size 16 bundles) Reserved - KVM_FAULT(67) - - .org kvm_ia64_ivt+0x8000 -// There is no particular reason for this code to be here, other than that -// there happens to be space here that would go unused otherwise. If this -// fault ever gets "unreserved", simply moved the following code to a more -// suitable spot... - - -ENTRY(kvm_dtlb_miss_dispatch) - mov r19 = 2 - KVM_SAVE_MIN_WITH_COVER_R19 - alloc r14=ar.pfs,0,0,3,0 - mov out0=cr.ifa - mov out1=r15 - adds r3=8,r2 // set up second base pointer - ;; - ssm psr.ic - ;; - srlz.i // guarantee that interruption collection is on - ;; - (p15) ssm psr.i // restore psr.i - addl r14=@gprel(ia64_leave_hypervisor_prepare),gp - ;; - KVM_SAVE_REST - KVM_SAVE_EXTRA - mov rp=r14 - ;; - adds out2=16,r12 - br.call.sptk.many b6=kvm_page_fault -END(kvm_dtlb_miss_dispatch) - -ENTRY(kvm_itlb_miss_dispatch) - - KVM_SAVE_MIN_WITH_COVER_R19 - alloc r14=ar.pfs,0,0,3,0 - mov out0=cr.ifa - mov out1=r15 - adds r3=8,r2 // set up second base pointer - ;; - ssm psr.ic - ;; - srlz.i // guarantee that interruption collection is on - ;; - (p15) ssm psr.i // restore psr.i - addl r14=@gprel(ia64_leave_hypervisor),gp - ;; - KVM_SAVE_REST - mov rp=r14 - ;; - adds out2=16,r12 - br.call.sptk.many b6=kvm_page_fault -END(kvm_itlb_miss_dispatch) - -ENTRY(kvm_dispatch_reflection) -/* - * Input: - * psr.ic: off - * r19: intr type (offset into ivt, see ia64_int.h) - * r31: contains saved predicates (pr) - */ - KVM_SAVE_MIN_WITH_COVER_R19 - alloc r14=ar.pfs,0,0,5,0 - mov out0=cr.ifa - mov out1=cr.isr - mov out2=cr.iim - mov out3=r15 - adds r3=8,r2 // set up second base pointer - ;; - ssm psr.ic - ;; - srlz.i // guarantee that interruption collection is on - ;; - (p15) ssm psr.i // restore psr.i - addl r14=@gprel(ia64_leave_hypervisor),gp - ;; - KVM_SAVE_REST - mov rp=r14 - ;; - adds out4=16,r12 - br.call.sptk.many b6=reflect_interruption -END(kvm_dispatch_reflection) - -ENTRY(kvm_dispatch_virtualization_fault) - adds r16 = VMM_VCPU_CAUSE_OFFSET,r21 - adds r17 = VMM_VCPU_OPCODE_OFFSET,r21 - ;; - st8 [r16] = r24 - st8 [r17] = r25 - ;; - KVM_SAVE_MIN_WITH_COVER_R19 - ;; - alloc r14=ar.pfs,0,0,2,0 // (must be first in insn group!) - mov out0=r13 //vcpu - adds r3=8,r2 // set up second base pointer - ;; - ssm psr.ic - ;; - srlz.i // guarantee that interruption collection is on - ;; - (p15) ssm psr.i // restore psr.i - addl r14=@gprel(ia64_leave_hypervisor_prepare),gp - ;; - KVM_SAVE_REST - KVM_SAVE_EXTRA - mov rp=r14 - ;; - adds out1=16,sp //regs - br.call.sptk.many b6=kvm_emulate -END(kvm_dispatch_virtualization_fault) - - -ENTRY(kvm_dispatch_interrupt) - KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3 - ;; - alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group - adds r3=8,r2 // set up second base pointer for SAVE_REST - ;; - ssm psr.ic - ;; - srlz.i - ;; - (p15) ssm psr.i - addl r14=@gprel(ia64_leave_hypervisor),gp - ;; - KVM_SAVE_REST - mov rp=r14 - ;; - mov out0=r13 // pass pointer to pt_regs as second arg - br.call.sptk.many b6=kvm_ia64_handle_irq -END(kvm_dispatch_interrupt) - -GLOBAL_ENTRY(ia64_leave_nested) - rsm psr.i - ;; - adds r21=PT(PR)+16,r12 - ;; - lfetch [r21],PT(CR_IPSR)-PT(PR) - adds r2=PT(B6)+16,r12 - adds r3=PT(R16)+16,r12 - ;; - lfetch [r21] - ld8 r28=[r2],8 // load b6 - adds r29=PT(R24)+16,r12 - - ld8.fill r16=[r3] - adds r3=PT(AR_CSD)-PT(R16),r3 - adds r30=PT(AR_CCV)+16,r12 - ;; - ld8.fill r24=[r29] - ld8 r15=[r30] // load ar.ccv - ;; - ld8 r29=[r2],16 // load b7 - ld8 r30=[r3],16 // load ar.csd - ;; - ld8 r31=[r2],16 // load ar.ssd - ld8.fill r8=[r3],16 - ;; - ld8.fill r9=[r2],16 - ld8.fill r10=[r3],PT(R17)-PT(R10) - ;; - ld8.fill r11=[r2],PT(R18)-PT(R11) - ld8.fill r17=[r3],16 - ;; - ld8.fill r18=[r2],16 - ld8.fill r19=[r3],16 - ;; - ld8.fill r20=[r2],16 - ld8.fill r21=[r3],16 - mov ar.csd=r30 - mov ar.ssd=r31 - ;; - rsm psr.i | psr.ic - // initiate turning off of interrupt and interruption collection - invala // invalidate ALAT - ;; - srlz.i - ;; - ld8.fill r22=[r2],24 - ld8.fill r23=[r3],24 - mov b6=r28 - ;; - ld8.fill r25=[r2],16 - ld8.fill r26=[r3],16 - mov b7=r29 - ;; - ld8.fill r27=[r2],16 - ld8.fill r28=[r3],16 - ;; - ld8.fill r29=[r2],16 - ld8.fill r30=[r3],24 - ;; - ld8.fill r31=[r2],PT(F9)-PT(R31) - adds r3=PT(F10)-PT(F6),r3 - ;; - ldf.fill f9=[r2],PT(F6)-PT(F9) - ldf.fill f10=[r3],PT(F8)-PT(F10) - ;; - ldf.fill f6=[r2],PT(F7)-PT(F6) - ;; - ldf.fill f7=[r2],PT(F11)-PT(F7) - ldf.fill f8=[r3],32 - ;; - srlz.i // ensure interruption collection is off - mov ar.ccv=r15 - ;; - bsw.0 // switch back to bank 0 (no stop bit required beforehand...) - ;; - ldf.fill f11=[r2] -// mov r18=r13 -// mov r21=r13 - adds r16=PT(CR_IPSR)+16,r12 - adds r17=PT(CR_IIP)+16,r12 - ;; - ld8 r29=[r16],16 // load cr.ipsr - ld8 r28=[r17],16 // load cr.iip - ;; - ld8 r30=[r16],16 // load cr.ifs - ld8 r25=[r17],16 // load ar.unat - ;; - ld8 r26=[r16],16 // load ar.pfs - ld8 r27=[r17],16 // load ar.rsc - cmp.eq p9,p0=r0,r0 - // set p9 to indicate that we should restore cr.ifs - ;; - ld8 r24=[r16],16 // load ar.rnat (may be garbage) - ld8 r23=[r17],16// load ar.bspstore (may be garbage) - ;; - ld8 r31=[r16],16 // load predicates - ld8 r22=[r17],16 // load b0 - ;; - ld8 r19=[r16],16 // load ar.rsc value for "loadrs" - ld8.fill r1=[r17],16 // load r1 - ;; - ld8.fill r12=[r16],16 - ld8.fill r13=[r17],16 - ;; - ld8 r20=[r16],16 // ar.fpsr - ld8.fill r15=[r17],16 - ;; - ld8.fill r14=[r16],16 - ld8.fill r2=[r17] - ;; - ld8.fill r3=[r16] - ;; - mov r16=ar.bsp // get existing backing store pointer - ;; - mov b0=r22 - mov ar.pfs=r26 - mov cr.ifs=r30 - mov cr.ipsr=r29 - mov ar.fpsr=r20 - mov cr.iip=r28 - ;; - mov ar.rsc=r27 - mov ar.unat=r25 - mov pr=r31,-1 - rfi -END(ia64_leave_nested) - -GLOBAL_ENTRY(ia64_leave_hypervisor_prepare) -/* - * work.need_resched etc. mustn't get changed - *by this CPU before it returns to - * user- or fsys-mode, hence we disable interrupts early on: - */ - adds r2 = PT(R4)+16,r12 - adds r3 = PT(R5)+16,r12 - adds r8 = PT(EML_UNAT)+16,r12 - ;; - ld8 r8 = [r8] - ;; - mov ar.unat=r8 - ;; - ld8.fill r4=[r2],16 //load r4 - ld8.fill r5=[r3],16 //load r5 - ;; - ld8.fill r6=[r2] //load r6 - ld8.fill r7=[r3] //load r7 - ;; -END(ia64_leave_hypervisor_prepare) -//fall through -GLOBAL_ENTRY(ia64_leave_hypervisor) - rsm psr.i - ;; - br.call.sptk.many b0=leave_hypervisor_tail - ;; - adds r20=PT(PR)+16,r12 - adds r8=PT(EML_UNAT)+16,r12 - ;; - ld8 r8=[r8] - ;; - mov ar.unat=r8 - ;; - lfetch [r20],PT(CR_IPSR)-PT(PR) - adds r2 = PT(B6)+16,r12 - adds r3 = PT(B7)+16,r12 - ;; - lfetch [r20] - ;; - ld8 r24=[r2],16 /* B6 */ - ld8 r25=[r3],16 /* B7 */ - ;; - ld8 r26=[r2],16 /* ar_csd */ - ld8 r27=[r3],16 /* ar_ssd */ - mov b6 = r24 - ;; - ld8.fill r8=[r2],16 - ld8.fill r9=[r3],16 - mov b7 = r25 - ;; - mov ar.csd = r26 - mov ar.ssd = r27 - ;; - ld8.fill r10=[r2],PT(R15)-PT(R10) - ld8.fill r11=[r3],PT(R14)-PT(R11) - ;; - ld8.fill r15=[r2],PT(R16)-PT(R15) - ld8.fill r14=[r3],PT(R17)-PT(R14) - ;; - ld8.fill r16=[r2],16 - ld8.fill r17=[r3],16 - ;; - ld8.fill r18=[r2],16 - ld8.fill r19=[r3],16 - ;; - ld8.fill r20=[r2],16 - ld8.fill r21=[r3],16 - ;; - ld8.fill r22=[r2],16 - ld8.fill r23=[r3],16 - ;; - ld8.fill r24=[r2],16 - ld8.fill r25=[r3],16 - ;; - ld8.fill r26=[r2],16 - ld8.fill r27=[r3],16 - ;; - ld8.fill r28=[r2],16 - ld8.fill r29=[r3],16 - ;; - ld8.fill r30=[r2],PT(F6)-PT(R30) - ld8.fill r31=[r3],PT(F7)-PT(R31) - ;; - rsm psr.i | psr.ic - // initiate turning off of interrupt and interruption collection - invala // invalidate ALAT - ;; - srlz.i // ensure interruption collection is off - ;; - bsw.0 - ;; - adds r16 = PT(CR_IPSR)+16,r12 - adds r17 = PT(CR_IIP)+16,r12 - mov r21=r13 // get current - ;; - ld8 r31=[r16],16 // load cr.ipsr - ld8 r30=[r17],16 // load cr.iip - ;; - ld8 r29=[r16],16 // load cr.ifs - ld8 r28=[r17],16 // load ar.unat - ;; - ld8 r27=[r16],16 // load ar.pfs - ld8 r26=[r17],16 // load ar.rsc - ;; - ld8 r25=[r16],16 // load ar.rnat - ld8 r24=[r17],16 // load ar.bspstore - ;; - ld8 r23=[r16],16 // load predicates - ld8 r22=[r17],16 // load b0 - ;; - ld8 r20=[r16],16 // load ar.rsc value for "loadrs" - ld8.fill r1=[r17],16 //load r1 - ;; - ld8.fill r12=[r16],16 //load r12 - ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13 - ;; - ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr - ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2 - ;; - ld8.fill r3=[r16] //load r3 - ld8 r18=[r17] //load ar_ccv - ;; - mov ar.fpsr=r19 - mov ar.ccv=r18 - shr.u r18=r20,16 - ;; -kvm_rbs_switch: - mov r19=96 - -kvm_dont_preserve_current_frame: -/* - * To prevent leaking bits between the hypervisor and guest domain, - * we must clear the stacked registers in the "invalid" partition here. - * 5 registers/cycle on McKinley). - */ -# define pRecurse p6 -# define pReturn p7 -# define Nregs 14 - - alloc loc0=ar.pfs,2,Nregs-2,2,0 - shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8)) - sub r19=r19,r18 // r19 = (physStackedSize + 8) - dirtySize - ;; - mov ar.rsc=r20 // load ar.rsc to be used for "loadrs" - shladd in0=loc1,3,r19 - mov in1=0 - ;; - TEXT_ALIGN(32) -kvm_rse_clear_invalid: - alloc loc0=ar.pfs,2,Nregs-2,2,0 - cmp.lt pRecurse,p0=Nregs*8,in0 - // if more than Nregs regs left to clear, (re)curse - add out0=-Nregs*8,in0 - add out1=1,in1 // increment recursion count - mov loc1=0 - mov loc2=0 - ;; - mov loc3=0 - mov loc4=0 - mov loc5=0 - mov loc6=0 - mov loc7=0 -(pRecurse) br.call.dptk.few b0=kvm_rse_clear_invalid - ;; - mov loc8=0 - mov loc9=0 - cmp.ne pReturn,p0=r0,in1 - // if recursion count != 0, we need to do a br.ret - mov loc10=0 - mov loc11=0 -(pReturn) br.ret.dptk.many b0 - -# undef pRecurse -# undef pReturn - -// loadrs has already been shifted - alloc r16=ar.pfs,0,0,0,0 // drop current register frame - ;; - loadrs - ;; - mov ar.bspstore=r24 - ;; - mov ar.unat=r28 - mov ar.rnat=r25 - mov ar.rsc=r26 - ;; - mov cr.ipsr=r31 - mov cr.iip=r30 - mov cr.ifs=r29 - mov ar.pfs=r27 - adds r18=VMM_VPD_BASE_OFFSET,r21 - ;; - ld8 r18=[r18] //vpd - adds r17=VMM_VCPU_ISR_OFFSET,r21 - ;; - ld8 r17=[r17] - adds r19=VMM_VPD_VPSR_OFFSET,r18 - ;; - ld8 r19=[r19] //vpsr - mov r25=r18 - adds r16= VMM_VCPU_GP_OFFSET,r21 - ;; - ld8 r16= [r16] // Put gp in r24 - movl r24=@gprel(ia64_vmm_entry) // calculate return address - ;; - add r24=r24,r16 - ;; - br.sptk.many kvm_vps_sync_write // call the service - ;; -END(ia64_leave_hypervisor) -// fall through -GLOBAL_ENTRY(ia64_vmm_entry) -/* - * must be at bank 0 - * parameter: - * r17:cr.isr - * r18:vpd - * r19:vpsr - * r22:b0 - * r23:predicate - */ - mov r24=r22 - mov r25=r18 - tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic -(p1) br.cond.sptk.few kvm_vps_resume_normal -(p2) br.cond.sptk.many kvm_vps_resume_handler - ;; -END(ia64_vmm_entry) - -/* - * extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, - * u64 arg3, u64 arg4, u64 arg5, - * u64 arg6, u64 arg7); - * - * XXX: The currently defined services use only 4 args at the max. The - * rest are not consumed. - */ -GLOBAL_ENTRY(ia64_call_vsa) - .regstk 4,4,0,0 - -rpsave = loc0 -pfssave = loc1 -psrsave = loc2 -entry = loc3 -hostret = r24 - - alloc pfssave=ar.pfs,4,4,0,0 - mov rpsave=rp - adds entry=VMM_VCPU_VSA_BASE_OFFSET, r13 - ;; - ld8 entry=[entry] -1: mov hostret=ip - mov r25=in1 // copy arguments - mov r26=in2 - mov r27=in3 - mov psrsave=psr - ;; - tbit.nz p6,p0=psrsave,14 // IA64_PSR_I - tbit.nz p7,p0=psrsave,13 // IA64_PSR_IC - ;; - add hostret=2f-1b,hostret // calculate return address - add entry=entry,in0 - ;; - rsm psr.i | psr.ic - ;; - srlz.i - mov b6=entry - br.cond.sptk b6 // call the service -2: -// Architectural sequence for enabling interrupts if necessary -(p7) ssm psr.ic - ;; -(p7) srlz.i - ;; -(p6) ssm psr.i - ;; - mov rp=rpsave - mov ar.pfs=pfssave - mov r8=r31 - ;; - srlz.d - br.ret.sptk rp - -END(ia64_call_vsa) - -#define INIT_BSPSTORE ((4<<30)-(12<<20)-0x100) - -GLOBAL_ENTRY(vmm_reset_entry) - //set up ipsr, iip, vpd.vpsr, dcr - // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1 - // For DCR: all bits 0 - bsw.0 - ;; - mov r21 =r13 - adds r14=-VMM_PT_REGS_SIZE, r12 - ;; - movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1 - movl r10=0x8000000000000000 - adds r16=PT(CR_IIP), r14 - adds r20=PT(R1), r14 - ;; - rsm psr.ic | psr.i - ;; - srlz.i - ;; - mov ar.rsc = 0 - ;; - flushrs - ;; - mov ar.bspstore = 0 - // clear BSPSTORE - ;; - mov cr.ipsr=r6 - mov cr.ifs=r10 - ld8 r4 = [r16] // Set init iip for first run. - ld8 r1 = [r20] - ;; - mov cr.iip=r4 - adds r16=VMM_VPD_BASE_OFFSET,r13 - ;; - ld8 r18=[r16] - ;; - adds r19=VMM_VPD_VPSR_OFFSET,r18 - ;; - ld8 r19=[r19] - mov r17=r0 - mov r22=r0 - mov r23=r0 - br.cond.sptk ia64_vmm_entry - br.ret.sptk b0 -END(vmm_reset_entry) diff --git a/arch/ia64/kvm/vti.h b/arch/ia64/kvm/vti.h deleted file mode 100644 index b214b5b0432d..000000000000 --- a/arch/ia64/kvm/vti.h +++ /dev/null @@ -1,290 +0,0 @@ -/* - * vti.h: prototype for generial vt related interface - * Copyright (c) 2004, Intel Corporation. - * - * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com) - * Fred Yang (fred.yang@intel.com) - * Kun Tian (Kevin Tian) (kevin.tian@intel.com) - * - * Copyright (c) 2007, Intel Corporation. - * Zhang xiantao - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - */ -#ifndef _KVM_VT_I_H -#define _KVM_VT_I_H - -#ifndef __ASSEMBLY__ -#include - -#include - -/* define itr.i and itr.d in ia64_itr function */ -#define ITR 0x01 -#define DTR 0x02 -#define IaDTR 0x03 - -#define IA64_TR_VMM 6 /*itr6, dtr6 : maps vmm code, vmbuffer*/ -#define IA64_TR_VM_DATA 7 /*dtr7 : maps current vm data*/ - -#define RR6 (6UL<<61) -#define RR7 (7UL<<61) - - -/* config_options in pal_vp_init_env */ -#define VP_INITIALIZE 1UL -#define VP_FR_PMC 1UL<<1 -#define VP_OPCODE 1UL<<8 -#define VP_CAUSE 1UL<<9 -#define VP_FW_ACC 1UL<<63 - -/* init vp env with initializing vm_buffer */ -#define VP_INIT_ENV_INITALIZE (VP_INITIALIZE | VP_FR_PMC |\ - VP_OPCODE | VP_CAUSE | VP_FW_ACC) -/* init vp env without initializing vm_buffer */ -#define VP_INIT_ENV VP_FR_PMC | VP_OPCODE | VP_CAUSE | VP_FW_ACC - -#define PAL_VP_CREATE 265 -/* Stacked Virt. Initializes a new VPD for the operation of - * a new virtual processor in the virtual environment. - */ -#define PAL_VP_ENV_INFO 266 -/*Stacked Virt. Returns the parameters needed to enter a virtual environment.*/ -#define PAL_VP_EXIT_ENV 267 -/*Stacked Virt. Allows a logical processor to exit a virtual environment.*/ -#define PAL_VP_INIT_ENV 268 -/*Stacked Virt. Allows a logical processor to enter a virtual environment.*/ -#define PAL_VP_REGISTER 269 -/*Stacked Virt. Register a different host IVT for the virtual processor.*/ -#define PAL_VP_RESUME 270 -/* Renamed from PAL_VP_RESUME */ -#define PAL_VP_RESTORE 270 -/*Stacked Virt. Resumes virtual processor operation on the logical processor.*/ -#define PAL_VP_SUSPEND 271 -/* Renamed from PAL_VP_SUSPEND */ -#define PAL_VP_SAVE 271 -/* Stacked Virt. Suspends operation for the specified virtual processor on - * the logical processor. - */ -#define PAL_VP_TERMINATE 272 -/* Stacked Virt. Terminates operation for the specified virtual processor.*/ - -union vac { - unsigned long value; - struct { - unsigned int a_int:1; - unsigned int a_from_int_cr:1; - unsigned int a_to_int_cr:1; - unsigned int a_from_psr:1; - unsigned int a_from_cpuid:1; - unsigned int a_cover:1; - unsigned int a_bsw:1; - long reserved:57; - }; -}; - -union vdc { - unsigned long value; - struct { - unsigned int d_vmsw:1; - unsigned int d_extint:1; - unsigned int d_ibr_dbr:1; - unsigned int d_pmc:1; - unsigned int d_to_pmd:1; - unsigned int d_itm:1; - long reserved:58; - }; -}; - -struct vpd { - union vac vac; - union vdc vdc; - unsigned long virt_env_vaddr; - unsigned long reserved1[29]; - unsigned long vhpi; - unsigned long reserved2[95]; - unsigned long vgr[16]; - unsigned long vbgr[16]; - unsigned long vnat; - unsigned long vbnat; - unsigned long vcpuid[5]; - unsigned long reserved3[11]; - unsigned long vpsr; - unsigned long vpr; - unsigned long reserved4[76]; - union { - unsigned long vcr[128]; - struct { - unsigned long dcr; - unsigned long itm; - unsigned long iva; - unsigned long rsv1[5]; - unsigned long pta; - unsigned long rsv2[7]; - unsigned long ipsr; - unsigned long isr; - unsigned long rsv3; - unsigned long iip; - unsigned long ifa; - unsigned long itir; - unsigned long iipa; - unsigned long ifs; - unsigned long iim; - unsigned long iha; - unsigned long rsv4[38]; - unsigned long lid; - unsigned long ivr; - unsigned long tpr; - unsigned long eoi; - unsigned long irr[4]; - unsigned long itv; - unsigned long pmv; - unsigned long cmcv; - unsigned long rsv5[5]; - unsigned long lrr0; - unsigned long lrr1; - unsigned long rsv6[46]; - }; - }; - unsigned long reserved5[128]; - unsigned long reserved6[3456]; - unsigned long vmm_avail[128]; - unsigned long reserved7[4096]; -}; - -#define PAL_PROC_VM_BIT (1UL << 40) -#define PAL_PROC_VMSW_BIT (1UL << 54) - -static inline s64 ia64_pal_vp_env_info(u64 *buffer_size, - u64 *vp_env_info) -{ - struct ia64_pal_retval iprv; - PAL_CALL_STK(iprv, PAL_VP_ENV_INFO, 0, 0, 0); - *buffer_size = iprv.v0; - *vp_env_info = iprv.v1; - return iprv.status; -} - -static inline s64 ia64_pal_vp_exit_env(u64 iva) -{ - struct ia64_pal_retval iprv; - - PAL_CALL_STK(iprv, PAL_VP_EXIT_ENV, (u64)iva, 0, 0); - return iprv.status; -} - -static inline s64 ia64_pal_vp_init_env(u64 config_options, u64 pbase_addr, - u64 vbase_addr, u64 *vsa_base) -{ - struct ia64_pal_retval iprv; - - PAL_CALL_STK(iprv, PAL_VP_INIT_ENV, config_options, pbase_addr, - vbase_addr); - *vsa_base = iprv.v0; - - return iprv.status; -} - -static inline s64 ia64_pal_vp_restore(u64 *vpd, u64 pal_proc_vector) -{ - struct ia64_pal_retval iprv; - - PAL_CALL_STK(iprv, PAL_VP_RESTORE, (u64)vpd, pal_proc_vector, 0); - - return iprv.status; -} - -static inline s64 ia64_pal_vp_save(u64 *vpd, u64 pal_proc_vector) -{ - struct ia64_pal_retval iprv; - - PAL_CALL_STK(iprv, PAL_VP_SAVE, (u64)vpd, pal_proc_vector, 0); - - return iprv.status; -} - -#endif - -/*VPD field offset*/ -#define VPD_VAC_START_OFFSET 0 -#define VPD_VDC_START_OFFSET 8 -#define VPD_VHPI_START_OFFSET 256 -#define VPD_VGR_START_OFFSET 1024 -#define VPD_VBGR_START_OFFSET 1152 -#define VPD_VNAT_START_OFFSET 1280 -#define VPD_VBNAT_START_OFFSET 1288 -#define VPD_VCPUID_START_OFFSET 1296 -#define VPD_VPSR_START_OFFSET 1424 -#define VPD_VPR_START_OFFSET 1432 -#define VPD_VRSE_CFLE_START_OFFSET 1440 -#define VPD_VCR_START_OFFSET 2048 -#define VPD_VTPR_START_OFFSET 2576 -#define VPD_VRR_START_OFFSET 3072 -#define VPD_VMM_VAIL_START_OFFSET 31744 - -/*Virtualization faults*/ - -#define EVENT_MOV_TO_AR 1 -#define EVENT_MOV_TO_AR_IMM 2 -#define EVENT_MOV_FROM_AR 3 -#define EVENT_MOV_TO_CR 4 -#define EVENT_MOV_FROM_CR 5 -#define EVENT_MOV_TO_PSR 6 -#define EVENT_MOV_FROM_PSR 7 -#define EVENT_ITC_D 8 -#define EVENT_ITC_I 9 -#define EVENT_MOV_TO_RR 10 -#define EVENT_MOV_TO_DBR 11 -#define EVENT_MOV_TO_IBR 12 -#define EVENT_MOV_TO_PKR 13 -#define EVENT_MOV_TO_PMC 14 -#define EVENT_MOV_TO_PMD 15 -#define EVENT_ITR_D 16 -#define EVENT_ITR_I 17 -#define EVENT_MOV_FROM_RR 18 -#define EVENT_MOV_FROM_DBR 19 -#define EVENT_MOV_FROM_IBR 20 -#define EVENT_MOV_FROM_PKR 21 -#define EVENT_MOV_FROM_PMC 22 -#define EVENT_MOV_FROM_CPUID 23 -#define EVENT_SSM 24 -#define EVENT_RSM 25 -#define EVENT_PTC_L 26 -#define EVENT_PTC_G 27 -#define EVENT_PTC_GA 28 -#define EVENT_PTR_D 29 -#define EVENT_PTR_I 30 -#define EVENT_THASH 31 -#define EVENT_TTAG 32 -#define EVENT_TPA 33 -#define EVENT_TAK 34 -#define EVENT_PTC_E 35 -#define EVENT_COVER 36 -#define EVENT_RFI 37 -#define EVENT_BSW_0 38 -#define EVENT_BSW_1 39 -#define EVENT_VMSW 40 - -/**PAL virtual services offsets */ -#define PAL_VPS_RESUME_NORMAL 0x0000 -#define PAL_VPS_RESUME_HANDLER 0x0400 -#define PAL_VPS_SYNC_READ 0x0800 -#define PAL_VPS_SYNC_WRITE 0x0c00 -#define PAL_VPS_SET_PENDING_INTERRUPT 0x1000 -#define PAL_VPS_THASH 0x1400 -#define PAL_VPS_TTAG 0x1800 -#define PAL_VPS_RESTORE 0x1c00 -#define PAL_VPS_SAVE 0x2000 - -#endif/* _VT_I_H*/ diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c deleted file mode 100644 index a7869f8f49a6..000000000000 --- a/arch/ia64/kvm/vtlb.c +++ /dev/null @@ -1,640 +0,0 @@ -/* - * vtlb.c: guest virtual tlb handling module. - * Copyright (c) 2004, Intel Corporation. - * Yaozu Dong (Eddie Dong) - * Xuefei Xu (Anthony Xu) - * - * Copyright (c) 2007, Intel Corporation. - * Xuefei Xu (Anthony Xu) - * Xiantao Zhang - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - * - */ - -#include "vcpu.h" - -#include - -#include - -/* - * Check to see if the address rid:va is translated by the TLB - */ - -static int __is_tr_translated(struct thash_data *trp, u64 rid, u64 va) -{ - return ((trp->p) && (trp->rid == rid) - && ((va-trp->vadr) < PSIZE(trp->ps))); -} - -/* - * Only for GUEST TR format. - */ -static int __is_tr_overlap(struct thash_data *trp, u64 rid, u64 sva, u64 eva) -{ - u64 sa1, ea1; - - if (!trp->p || trp->rid != rid) - return 0; - - sa1 = trp->vadr; - ea1 = sa1 + PSIZE(trp->ps) - 1; - eva -= 1; - if ((sva > ea1) || (sa1 > eva)) - return 0; - else - return 1; - -} - -void machine_tlb_purge(u64 va, u64 ps) -{ - ia64_ptcl(va, ps << 2); -} - -void local_flush_tlb_all(void) -{ - int i, j; - unsigned long flags, count0, count1; - unsigned long stride0, stride1, addr; - - addr = current_vcpu->arch.ptce_base; - count0 = current_vcpu->arch.ptce_count[0]; - count1 = current_vcpu->arch.ptce_count[1]; - stride0 = current_vcpu->arch.ptce_stride[0]; - stride1 = current_vcpu->arch.ptce_stride[1]; - - local_irq_save(flags); - for (i = 0; i < count0; ++i) { - for (j = 0; j < count1; ++j) { - ia64_ptce(addr); - addr += stride1; - } - addr += stride0; - } - local_irq_restore(flags); - ia64_srlz_i(); /* srlz.i implies srlz.d */ -} - -int vhpt_enabled(struct kvm_vcpu *vcpu, u64 vadr, enum vhpt_ref ref) -{ - union ia64_rr vrr; - union ia64_pta vpta; - struct ia64_psr vpsr; - - vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); - vrr.val = vcpu_get_rr(vcpu, vadr); - vpta.val = vcpu_get_pta(vcpu); - - if (vrr.ve & vpta.ve) { - switch (ref) { - case DATA_REF: - case NA_REF: - return vpsr.dt; - case INST_REF: - return vpsr.dt && vpsr.it && vpsr.ic; - case RSE_REF: - return vpsr.dt && vpsr.rt; - - } - } - return 0; -} - -struct thash_data *vsa_thash(union ia64_pta vpta, u64 va, u64 vrr, u64 *tag) -{ - u64 index, pfn, rid, pfn_bits; - - pfn_bits = vpta.size - 5 - 8; - pfn = REGION_OFFSET(va) >> _REGION_PAGE_SIZE(vrr); - rid = _REGION_ID(vrr); - index = ((rid & 0xff) << pfn_bits)|(pfn & ((1UL << pfn_bits) - 1)); - *tag = ((rid >> 8) & 0xffff) | ((pfn >> pfn_bits) << 16); - - return (struct thash_data *)((vpta.base << PTA_BASE_SHIFT) + - (index << 5)); -} - -struct thash_data *__vtr_lookup(struct kvm_vcpu *vcpu, u64 va, int type) -{ - - struct thash_data *trp; - int i; - u64 rid; - - rid = vcpu_get_rr(vcpu, va); - rid = rid & RR_RID_MASK; - if (type == D_TLB) { - if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) { - for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0; - i < NDTRS; i++, trp++) { - if (__is_tr_translated(trp, rid, va)) - return trp; - } - } - } else { - if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) { - for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0; - i < NITRS; i++, trp++) { - if (__is_tr_translated(trp, rid, va)) - return trp; - } - } - } - - return NULL; -} - -static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte) -{ - union ia64_rr rr; - struct thash_data *head; - unsigned long ps, gpaddr; - - ps = itir_ps(itir); - rr.val = ia64_get_rr(ifa); - - gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) | - (ifa & ((1UL << ps) - 1)); - - head = (struct thash_data *)ia64_thash(ifa); - head->etag = INVALID_TI_TAG; - ia64_mf(); - head->page_flags = pte & ~PAGE_FLAGS_RV_MASK; - head->itir = rr.ps << 2; - head->etag = ia64_ttag(ifa); - head->gpaddr = gpaddr; -} - -void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps) -{ - u64 i, dirty_pages = 1; - u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT; - vmm_spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa); - void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE; - - dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT; - - vmm_spin_lock(lock); - for (i = 0; i < dirty_pages; i++) { - /* avoid RMW */ - if (!test_bit(base_gfn + i, dirty_bitmap)) - set_bit(base_gfn + i , dirty_bitmap); - } - vmm_spin_unlock(lock); -} - -void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va, int type) -{ - u64 phy_pte, psr; - union ia64_rr mrr; - - mrr.val = ia64_get_rr(va); - phy_pte = translate_phy_pte(&pte, itir, va); - - if (itir_ps(itir) >= mrr.ps) { - vhpt_insert(phy_pte, itir, va, pte); - } else { - phy_pte &= ~PAGE_FLAGS_RV_MASK; - psr = ia64_clear_ic(); - ia64_itc(type, va, phy_pte, itir_ps(itir)); - paravirt_dv_serialize_data(); - ia64_set_psr(psr); - } - - if (!(pte&VTLB_PTE_IO)) - mark_pages_dirty(v, pte, itir_ps(itir)); -} - -/* - * vhpt lookup - */ -struct thash_data *vhpt_lookup(u64 va) -{ - struct thash_data *head; - u64 tag; - - head = (struct thash_data *)ia64_thash(va); - tag = ia64_ttag(va); - if (head->etag == tag) - return head; - return NULL; -} - -u64 guest_vhpt_lookup(u64 iha, u64 *pte) -{ - u64 ret; - struct thash_data *data; - - data = __vtr_lookup(current_vcpu, iha, D_TLB); - if (data != NULL) - thash_vhpt_insert(current_vcpu, data->page_flags, - data->itir, iha, D_TLB); - - asm volatile ("rsm psr.ic|psr.i;;" - "srlz.d;;" - "ld8.s r9=[%1];;" - "tnat.nz p6,p7=r9;;" - "(p6) mov %0=1;" - "(p6) mov r9=r0;" - "(p7) extr.u r9=r9,0,53;;" - "(p7) mov %0=r0;" - "(p7) st8 [%2]=r9;;" - "ssm psr.ic;;" - "srlz.d;;" - "ssm psr.i;;" - "srlz.d;;" - : "=&r"(ret) : "r"(iha), "r"(pte) : "memory"); - - return ret; -} - -/* - * purge software guest tlb - */ - -static void vtlb_purge(struct kvm_vcpu *v, u64 va, u64 ps) -{ - struct thash_data *cur; - u64 start, curadr, size, psbits, tag, rr_ps, num; - union ia64_rr vrr; - struct thash_cb *hcb = &v->arch.vtlb; - - vrr.val = vcpu_get_rr(v, va); - psbits = VMX(v, psbits[(va >> 61)]); - start = va & ~((1UL << ps) - 1); - while (psbits) { - curadr = start; - rr_ps = __ffs(psbits); - psbits &= ~(1UL << rr_ps); - num = 1UL << ((ps < rr_ps) ? 0 : (ps - rr_ps)); - size = PSIZE(rr_ps); - vrr.ps = rr_ps; - while (num) { - cur = vsa_thash(hcb->pta, curadr, vrr.val, &tag); - if (cur->etag == tag && cur->ps == rr_ps) - cur->etag = INVALID_TI_TAG; - curadr += size; - num--; - } - } -} - - -/* - * purge VHPT and machine TLB - */ -static void vhpt_purge(struct kvm_vcpu *v, u64 va, u64 ps) -{ - struct thash_data *cur; - u64 start, size, tag, num; - union ia64_rr rr; - - start = va & ~((1UL << ps) - 1); - rr.val = ia64_get_rr(va); - size = PSIZE(rr.ps); - num = 1UL << ((ps < rr.ps) ? 0 : (ps - rr.ps)); - while (num) { - cur = (struct thash_data *)ia64_thash(start); - tag = ia64_ttag(start); - if (cur->etag == tag) - cur->etag = INVALID_TI_TAG; - start += size; - num--; - } - machine_tlb_purge(va, ps); -} - -/* - * Insert an entry into hash TLB or VHPT. - * NOTES: - * 1: When inserting VHPT to thash, "va" is a must covered - * address by the inserted machine VHPT entry. - * 2: The format of entry is always in TLB. - * 3: The caller need to make sure the new entry will not overlap - * with any existed entry. - */ -void vtlb_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va) -{ - struct thash_data *head; - union ia64_rr vrr; - u64 tag; - struct thash_cb *hcb = &v->arch.vtlb; - - vrr.val = vcpu_get_rr(v, va); - vrr.ps = itir_ps(itir); - VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps); - head = vsa_thash(hcb->pta, va, vrr.val, &tag); - head->page_flags = pte; - head->itir = itir; - head->etag = tag; -} - -int vtr_find_overlap(struct kvm_vcpu *vcpu, u64 va, u64 ps, int type) -{ - struct thash_data *trp; - int i; - u64 end, rid; - - rid = vcpu_get_rr(vcpu, va); - rid = rid & RR_RID_MASK; - end = va + PSIZE(ps); - if (type == D_TLB) { - if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) { - for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0; - i < NDTRS; i++, trp++) { - if (__is_tr_overlap(trp, rid, va, end)) - return i; - } - } - } else { - if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) { - for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0; - i < NITRS; i++, trp++) { - if (__is_tr_overlap(trp, rid, va, end)) - return i; - } - } - } - return -1; -} - -/* - * Purge entries in VTLB and VHPT - */ -void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps) -{ - if (vcpu_quick_region_check(v->arch.tc_regions, va)) - vtlb_purge(v, va, ps); - vhpt_purge(v, va, ps); -} - -void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps) -{ - u64 old_va = va; - va = REGION_OFFSET(va); - if (vcpu_quick_region_check(v->arch.tc_regions, old_va)) - vtlb_purge(v, va, ps); - vhpt_purge(v, va, ps); -} - -u64 translate_phy_pte(u64 *pte, u64 itir, u64 va) -{ - u64 ps, ps_mask, paddr, maddr, io_mask; - union pte_flags phy_pte; - - ps = itir_ps(itir); - ps_mask = ~((1UL << ps) - 1); - phy_pte.val = *pte; - paddr = *pte; - paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask); - maddr = kvm_get_mpt_entry(paddr >> PAGE_SHIFT); - io_mask = maddr & GPFN_IO_MASK; - if (io_mask && (io_mask != GPFN_PHYS_MMIO)) { - *pte |= VTLB_PTE_IO; - return -1; - } - maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) | - (paddr & ~PAGE_MASK); - phy_pte.ppn = maddr >> ARCH_PAGE_SHIFT; - return phy_pte.val; -} - -/* - * Purge overlap TCs and then insert the new entry to emulate itc ops. - * Notes: Only TC entry can purge and insert. - */ -void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, - u64 ifa, int type) -{ - u64 ps; - u64 phy_pte, io_mask, index; - union ia64_rr vrr, mrr; - - ps = itir_ps(itir); - vrr.val = vcpu_get_rr(v, ifa); - mrr.val = ia64_get_rr(ifa); - - index = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT; - io_mask = kvm_get_mpt_entry(index) & GPFN_IO_MASK; - phy_pte = translate_phy_pte(&pte, itir, ifa); - - /* Ensure WB attribute if pte is related to a normal mem page, - * which is required by vga acceleration since qemu maps shared - * vram buffer with WB. - */ - if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT) && - io_mask != GPFN_PHYS_MMIO) { - pte &= ~_PAGE_MA_MASK; - phy_pte &= ~_PAGE_MA_MASK; - } - - vtlb_purge(v, ifa, ps); - vhpt_purge(v, ifa, ps); - - if ((ps != mrr.ps) || (pte & VTLB_PTE_IO)) { - vtlb_insert(v, pte, itir, ifa); - vcpu_quick_region_set(VMX(v, tc_regions), ifa); - } - if (pte & VTLB_PTE_IO) - return; - - if (ps >= mrr.ps) - vhpt_insert(phy_pte, itir, ifa, pte); - else { - u64 psr; - phy_pte &= ~PAGE_FLAGS_RV_MASK; - psr = ia64_clear_ic(); - ia64_itc(type, ifa, phy_pte, ps); - paravirt_dv_serialize_data(); - ia64_set_psr(psr); - } - if (!(pte&VTLB_PTE_IO)) - mark_pages_dirty(v, pte, ps); - -} - -/* - * Purge all TCs or VHPT entries including those in Hash table. - * - */ - -void thash_purge_all(struct kvm_vcpu *v) -{ - int i; - struct thash_data *head; - struct thash_cb *vtlb, *vhpt; - vtlb = &v->arch.vtlb; - vhpt = &v->arch.vhpt; - - for (i = 0; i < 8; i++) - VMX(v, psbits[i]) = 0; - - head = vtlb->hash; - for (i = 0; i < vtlb->num; i++) { - head->page_flags = 0; - head->etag = INVALID_TI_TAG; - head->itir = 0; - head->next = 0; - head++; - }; - - head = vhpt->hash; - for (i = 0; i < vhpt->num; i++) { - head->page_flags = 0; - head->etag = INVALID_TI_TAG; - head->itir = 0; - head->next = 0; - head++; - }; - - local_flush_tlb_all(); -} - -/* - * Lookup the hash table and its collision chain to find an entry - * covering this address rid:va or the entry. - * - * INPUT: - * in: TLB format for both VHPT & TLB. - */ -struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data) -{ - struct thash_data *cch; - u64 psbits, ps, tag; - union ia64_rr vrr; - - struct thash_cb *hcb = &v->arch.vtlb; - - cch = __vtr_lookup(v, va, is_data); - if (cch) - return cch; - - if (vcpu_quick_region_check(v->arch.tc_regions, va) == 0) - return NULL; - - psbits = VMX(v, psbits[(va >> 61)]); - vrr.val = vcpu_get_rr(v, va); - while (psbits) { - ps = __ffs(psbits); - psbits &= ~(1UL << ps); - vrr.ps = ps; - cch = vsa_thash(hcb->pta, va, vrr.val, &tag); - if (cch->etag == tag && cch->ps == ps) - return cch; - } - - return NULL; -} - -/* - * Initialize internal control data before service. - */ -void thash_init(struct thash_cb *hcb, u64 sz) -{ - int i; - struct thash_data *head; - - hcb->pta.val = (unsigned long)hcb->hash; - hcb->pta.vf = 1; - hcb->pta.ve = 1; - hcb->pta.size = sz; - head = hcb->hash; - for (i = 0; i < hcb->num; i++) { - head->page_flags = 0; - head->itir = 0; - head->etag = INVALID_TI_TAG; - head->next = 0; - head++; - } -} - -u64 kvm_get_mpt_entry(u64 gpfn) -{ - u64 *base = (u64 *) KVM_P2M_BASE; - - if (gpfn >= (KVM_P2M_SIZE >> 3)) - panic_vm(current_vcpu, "Invalid gpfn =%lx\n", gpfn); - - return *(base + gpfn); -} - -u64 kvm_lookup_mpa(u64 gpfn) -{ - u64 maddr; - maddr = kvm_get_mpt_entry(gpfn); - return maddr&_PAGE_PPN_MASK; -} - -u64 kvm_gpa_to_mpa(u64 gpa) -{ - u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT); - return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK); -} - -/* - * Fetch guest bundle code. - * INPUT: - * gip: guest ip - * pbundle: used to return fetched bundle. - */ -int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle) -{ - u64 gpip = 0; /* guest physical IP*/ - u64 *vpa; - struct thash_data *tlb; - u64 maddr; - - if (!(VCPU(vcpu, vpsr) & IA64_PSR_IT)) { - /* I-side physical mode */ - gpip = gip; - } else { - tlb = vtlb_lookup(vcpu, gip, I_TLB); - if (tlb) - gpip = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) | - (gip & (PSIZE(tlb->ps) - 1)); - } - if (gpip) { - maddr = kvm_gpa_to_mpa(gpip); - } else { - tlb = vhpt_lookup(gip); - if (tlb == NULL) { - ia64_ptcl(gip, ARCH_PAGE_SHIFT << 2); - return IA64_FAULT; - } - maddr = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) - | (gip & (PSIZE(tlb->ps) - 1)); - } - vpa = (u64 *)__kvm_va(maddr); - - pbundle->i64[0] = *vpa++; - pbundle->i64[1] = *vpa; - - return IA64_NO_FAULT; -} - -void kvm_init_vhpt(struct kvm_vcpu *v) -{ - v->arch.vhpt.num = VHPT_NUM_ENTRIES; - thash_init(&v->arch.vhpt, VHPT_SHIFT); - ia64_set_pta(v->arch.vhpt.pta.val); - /*Enable VHPT here?*/ -} - -void kvm_init_vtlb(struct kvm_vcpu *v) -{ - v->arch.vtlb.num = VTLB_NUM_ENTRIES; - thash_init(&v->arch.vtlb, VTLB_SHIFT); -} diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 0ba4057d271b..f0f7ef82b7a6 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c @@ -586,11 +586,6 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, case IOAPIC_REG_WINDOW: ioapic_write_indirect(ioapic, data); break; -#ifdef CONFIG_IA64 - case IOAPIC_REG_EOI: - __kvm_ioapic_update_eoi(NULL, ioapic, data, IOAPIC_LEVEL_TRIG); - break; -#endif default: break; diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h index 31725a3a93b8..dc3baa3a538f 100644 --- a/virt/kvm/ioapic.h +++ b/virt/kvm/ioapic.h @@ -19,7 +19,6 @@ struct kvm_vcpu; /* Direct registers. */ #define IOAPIC_REG_SELECT 0x00 #define IOAPIC_REG_WINDOW 0x10 -#define IOAPIC_REG_EOI 0x40 /* IA64 IOSAPIC only */ /* Indirect registers. */ #define IOAPIC_REG_APIC_ID 0x00 /* x86 IOAPIC only */ diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index 963b8995a9e8..1345bde064f5 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c @@ -26,9 +26,6 @@ #include #include -#ifdef CONFIG_IA64 -#include -#endif #include "irq.h" @@ -57,12 +54,7 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e, inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq) { -#ifdef CONFIG_IA64 - return irq->delivery_mode == - (IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT); -#else return irq->delivery_mode == APIC_DM_LOWEST; -#endif } int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, @@ -346,20 +338,6 @@ static const struct kvm_irq_routing_entry default_routing[] = { ROUTING_ENTRY1(18), ROUTING_ENTRY1(19), ROUTING_ENTRY1(20), ROUTING_ENTRY1(21), ROUTING_ENTRY1(22), ROUTING_ENTRY1(23), -#ifdef CONFIG_IA64 - ROUTING_ENTRY1(24), ROUTING_ENTRY1(25), - ROUTING_ENTRY1(26), ROUTING_ENTRY1(27), - ROUTING_ENTRY1(28), ROUTING_ENTRY1(29), - ROUTING_ENTRY1(30), ROUTING_ENTRY1(31), - ROUTING_ENTRY1(32), ROUTING_ENTRY1(33), - ROUTING_ENTRY1(34), ROUTING_ENTRY1(35), - ROUTING_ENTRY1(36), ROUTING_ENTRY1(37), - ROUTING_ENTRY1(38), ROUTING_ENTRY1(39), - ROUTING_ENTRY1(40), ROUTING_ENTRY1(41), - ROUTING_ENTRY1(42), ROUTING_ENTRY1(43), - ROUTING_ENTRY1(44), ROUTING_ENTRY1(45), - ROUTING_ENTRY1(46), ROUTING_ENTRY1(47), -#endif }; int kvm_setup_default_irq_routing(struct kvm *kvm) -- cgit v1.2.3-59-g8ed1b From 5bfb937c6ef91af4cfb4ec432c2eb6767bab97b0 Mon Sep 17 00:00:00 2001 From: Kukjin Kim Date: Fri, 14 Nov 2014 16:54:57 +0900 Subject: MAINTAINERS: update email address and cleanup for exynos entry Use kernel.org account instead of samsung.com and cleanup for Samsung s3c, s5p and exynos SoCs. Signed-off-by: Kukjin Kim Acked-by: Ben Dooks Signed-off-by: Arnd Bergmann --- MAINTAINERS | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 78ed8438d193..bebc46ee684d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1308,30 +1308,22 @@ F: drivers/*/*rockchip* F: drivers/*/*/*rockchip* F: sound/soc/rockchip/ -ARM/SAMSUNG ARM ARCHITECTURES -M: Ben Dooks -M: Kukjin Kim +ARM/SAMSUNG EXYNOS ARM ARCHITECTURES +M: Kukjin Kim L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) -W: http://www.fluff.org/ben/linux/ S: Maintained F: arch/arm/boot/dts/s3c* F: arch/arm/boot/dts/exynos* F: arch/arm/plat-samsung/ F: arch/arm/mach-s3c24*/ F: arch/arm/mach-s3c64xx/ +F: arch/arm/mach-s5p*/ +F: arch/arm/mach-exynos*/ F: drivers/*/*s3c2410* F: drivers/*/*/*s3c2410* F: drivers/spi/spi-s3c* F: sound/soc/samsung/* - -ARM/S5P EXYNOS ARM ARCHITECTURES -M: Kukjin Kim -L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) -S: Maintained -F: arch/arm/mach-s5p*/ -F: arch/arm/mach-exynos*/ N: exynos ARM/SAMSUNG MOBILE MACHINE SUPPORT -- cgit v1.2.3-59-g8ed1b From 3b4b6fe94edbef8a356d4e5e59ec00e9b5f99e15 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 14 Nov 2014 12:53:43 -0800 Subject: MAINTAINERS: update brcmstb entries Add Gregory Fong and myself as maintainers for the brcmstb platform along with Brian Norris and Marc Carino. Signed-off-by: Florian Fainelli Signed-off-by: Arnd Bergmann --- MAINTAINERS | 2 ++ 1 file changed, 2 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index bebc46ee684d..7bbca94e4490 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2087,6 +2087,8 @@ F: arch/arm/include/debug/bcm63xx.S BROADCOM BCM7XXX ARM ARCHITECTURE M: Marc Carino M: Brian Norris +M: Gregory Fong +M: Florian Fainelli L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-bcm/*brcmstb* -- cgit v1.2.3-59-g8ed1b From e36661e48baad30e7fd1f9f652a64d7c26cffd04 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 14 Nov 2014 12:53:44 -0800 Subject: MAINTAINERS: add entry for the GISB arbiter driver The GISB bus arbiter driver is relevant for BCM7xxx (brcmstb) platforms, add an entry to cover this file. Signed-off-by: Florian Fainelli Signed-off-by: Arnd Bergmann --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 7bbca94e4490..d45abb11cfd1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2093,6 +2093,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-bcm/*brcmstb* F: arch/arm/boot/dts/bcm7*.dts* +F: drivers/bus/brcmstb_gisb.c BROADCOM TG3 GIGABIT ETHERNET DRIVER M: Prashant Sreedharan -- cgit v1.2.3-59-g8ed1b From ef94b1864d1ed5be54376404bb23d22ed0481feb Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Thu, 20 Nov 2014 16:39:59 +0100 Subject: ovl: rename filesystem type to "overlay" Some distributions carry an "old" format of overlayfs while mainline has a "new" format. The distros will possibly want to keep the old overlayfs alongside the new for compatibility reasons. To make it possible to differentiate the two versions change the name of the new one from "overlayfs" to "overlay". Signed-off-by: Miklos Szeredi Reported-by: Serge Hallyn Cc: Andy Whitcroft --- Documentation/filesystems/overlayfs.txt | 2 +- MAINTAINERS | 2 +- fs/Makefile | 2 +- fs/overlayfs/Kconfig | 2 +- fs/overlayfs/Makefile | 4 ++-- fs/overlayfs/super.c | 6 +++--- 6 files changed, 9 insertions(+), 9 deletions(-) (limited to 'MAINTAINERS') diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt index 530850a72735..a27c950ece61 100644 --- a/Documentation/filesystems/overlayfs.txt +++ b/Documentation/filesystems/overlayfs.txt @@ -64,7 +64,7 @@ is formed. At mount time, the two directories given as mount options "lowerdir" and "upperdir" are combined into a merged directory: - mount -t overlayfs overlayfs -olowerdir=/lower,upperdir=/upper,\ + mount -t overlay overlay -olowerdir=/lower,upperdir=/upper,\ workdir=/work /merged The "workdir" needs to be an empty directory on the same filesystem diff --git a/MAINTAINERS b/MAINTAINERS index c444907ccd69..af1e738e8772 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6888,7 +6888,7 @@ F: drivers/scsi/osd/ F: include/scsi/osd_* F: fs/exofs/ -OVERLAYFS FILESYSTEM +OVERLAY FILESYSTEM M: Miklos Szeredi L: linux-fsdevel@vger.kernel.org S: Supported diff --git a/fs/Makefile b/fs/Makefile index 34a1b9dea6dd..da0bbb456d3f 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -104,7 +104,7 @@ obj-$(CONFIG_QNX6FS_FS) += qnx6/ obj-$(CONFIG_AUTOFS4_FS) += autofs4/ obj-$(CONFIG_ADFS_FS) += adfs/ obj-$(CONFIG_FUSE_FS) += fuse/ -obj-$(CONFIG_OVERLAYFS_FS) += overlayfs/ +obj-$(CONFIG_OVERLAY_FS) += overlayfs/ obj-$(CONFIG_UDF_FS) += udf/ obj-$(CONFIG_SUN_OPENPROMFS) += openpromfs/ obj-$(CONFIG_OMFS_FS) += omfs/ diff --git a/fs/overlayfs/Kconfig b/fs/overlayfs/Kconfig index e60125976873..34355818a2e0 100644 --- a/fs/overlayfs/Kconfig +++ b/fs/overlayfs/Kconfig @@ -1,4 +1,4 @@ -config OVERLAYFS_FS +config OVERLAY_FS tristate "Overlay filesystem support" help An overlay filesystem combines two filesystems - an 'upper' filesystem diff --git a/fs/overlayfs/Makefile b/fs/overlayfs/Makefile index 8f91889480d0..900daed3e91d 100644 --- a/fs/overlayfs/Makefile +++ b/fs/overlayfs/Makefile @@ -2,6 +2,6 @@ # Makefile for the overlay filesystem. # -obj-$(CONFIG_OVERLAYFS_FS) += overlayfs.o +obj-$(CONFIG_OVERLAY_FS) += overlay.o -overlayfs-objs := super.o inode.o dir.o readdir.o copy_up.o +overlay-objs := super.o inode.o dir.o readdir.o copy_up.o diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 08b704cebfc4..b92bd1829cf7 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -24,7 +24,7 @@ MODULE_AUTHOR("Miklos Szeredi "); MODULE_DESCRIPTION("Overlay filesystem"); MODULE_LICENSE("GPL"); -#define OVERLAYFS_SUPER_MAGIC 0x794c764f +#define OVERLAYFS_SUPER_MAGIC 0x794c7630 struct ovl_config { char *lowerdir; @@ -776,11 +776,11 @@ static struct dentry *ovl_mount(struct file_system_type *fs_type, int flags, static struct file_system_type ovl_fs_type = { .owner = THIS_MODULE, - .name = "overlayfs", + .name = "overlay", .mount = ovl_mount, .kill_sb = kill_anon_super, }; -MODULE_ALIAS_FS("overlayfs"); +MODULE_ALIAS_FS("overlay"); static int __init ovl_init(void) { -- cgit v1.2.3-59-g8ed1b From 1d113735ecf21de74a04c3b58fa106ac2e64ca0d Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Thu, 20 Nov 2014 16:40:01 +0100 Subject: ovl: update MAINTAINERS There's a union/overlay specific mailing list now. Also add a git tree. Signed-off-by: Miklos Szeredi --- MAINTAINERS | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index af1e738e8772..0ff630de8a6d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6890,9 +6890,10 @@ F: fs/exofs/ OVERLAY FILESYSTEM M: Miklos Szeredi -L: linux-fsdevel@vger.kernel.org +L: linux-unionfs@vger.kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/vfs.git S: Supported -F: fs/overlayfs/* +F: fs/overlayfs/ F: Documentation/filesystems/overlayfs.txt P54 WIRELESS DRIVER -- cgit v1.2.3-59-g8ed1b From 9661975dded8d39fa320218a6396b8be0972bb2f Mon Sep 17 00:00:00 2001 From: Andrey Utkin Date: Mon, 17 Nov 2014 13:59:23 -0300 Subject: [media] Update MAINTAINERS for solo6x10 Signed-off-by: Andrey Utkin Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- MAINTAINERS | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index dab92a78d1d5..5af7cb642c41 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8641,7 +8641,9 @@ S: Maintained F: drivers/leds/leds-net48xx.c SOFTLOGIC 6x10 MPEG CODEC -M: Ismael Luceno +M: Bluecherry Maintainers +M: Andrey Utkin +M: Andrey Utkin L: linux-media@vger.kernel.org S: Supported F: drivers/media/pci/solo6x10/ -- cgit v1.2.3-59-g8ed1b From 009a5410337224896e300a570e185308836a2f14 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Fri, 21 Nov 2014 15:48:27 -0200 Subject: MAINTAINERS: Update mchehab's addresses I'm using the new Open Source Group address for my upstream work. While the other email is still valid, it is better for me to receive patches via the new address. So, replace it everywhere inside MAINTAINERS. Signed-off-by: Mauro Carvalho Chehab --- MAINTAINERS | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 5af7cb642c41..629c5b127292 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1827,7 +1827,7 @@ F: include/net/ax25.h F: net/ax25/ AZ6007 DVB DRIVER -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org W: http://linuxtv.org T: git git://linuxtv.org/media_tree.git @@ -2196,7 +2196,7 @@ F: Documentation/filesystems/btrfs.txt F: fs/btrfs/ BTTV VIDEO4LINUX DRIVER -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org W: http://linuxtv.org T: git git://linuxtv.org/media_tree.git @@ -2717,7 +2717,7 @@ F: drivers/media/common/cx2341x* F: include/media/cx2341x* CX88 VIDEO4LINUX DRIVER -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org W: http://linuxtv.org T: git git://linuxtv.org/media_tree.git @@ -3386,7 +3386,7 @@ F: fs/ecryptfs/ EDAC-CORE M: Doug Thompson M: Borislav Petkov -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-edac@vger.kernel.org W: bluesmoke.sourceforge.net S: Supported @@ -3435,7 +3435,7 @@ S: Maintained F: drivers/edac/e7xxx_edac.c EDAC-GHES -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-edac@vger.kernel.org W: bluesmoke.sourceforge.net S: Maintained @@ -3463,21 +3463,21 @@ S: Maintained F: drivers/edac/i5000_edac.c EDAC-I5400 -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-edac@vger.kernel.org W: bluesmoke.sourceforge.net S: Maintained F: drivers/edac/i5400_edac.c EDAC-I7300 -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-edac@vger.kernel.org W: bluesmoke.sourceforge.net S: Maintained F: drivers/edac/i7300_edac.c EDAC-I7CORE -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-edac@vger.kernel.org W: bluesmoke.sourceforge.net S: Maintained @@ -3520,7 +3520,7 @@ S: Maintained F: drivers/edac/r82600_edac.c EDAC-SBRIDGE -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-edac@vger.kernel.org W: bluesmoke.sourceforge.net S: Maintained @@ -3580,7 +3580,7 @@ S: Maintained F: drivers/net/ethernet/ibm/ehea/ EM28XX VIDEO4LINUX DRIVER -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org W: http://linuxtv.org T: git git://linuxtv.org/media_tree.git @@ -5941,7 +5941,7 @@ S: Maintained F: drivers/media/radio/radio-maxiradio* MEDIA INPUT INFRASTRUCTURE (V4L/DVB) -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab P: LinuxTV.org Project L: linux-media@vger.kernel.org W: http://linuxtv.org @@ -7970,7 +7970,7 @@ S: Odd Fixes F: drivers/media/i2c/saa6588* SAA7134 VIDEO4LINUX DRIVER -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org W: http://linuxtv.org T: git git://linuxtv.org/media_tree.git @@ -8428,7 +8428,7 @@ S: Maintained F: drivers/media/radio/si4713/radio-usb-si4713.c SIANO DVB DRIVER -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org W: http://linuxtv.org T: git git://linuxtv.org/media_tree.git @@ -9117,7 +9117,7 @@ S: Maintained F: drivers/media/i2c/tda9840* TEA5761 TUNER DRIVER -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org W: http://linuxtv.org T: git git://linuxtv.org/media_tree.git @@ -9125,7 +9125,7 @@ S: Odd fixes F: drivers/media/tuners/tea5761.* TEA5767 TUNER DRIVER -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org W: http://linuxtv.org T: git git://linuxtv.org/media_tree.git @@ -9437,7 +9437,7 @@ F: include/linux/shmem_fs.h F: mm/shmem.c TM6000 VIDEO4LINUX DRIVER -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org W: http://linuxtv.org T: git git://linuxtv.org/media_tree.git @@ -10264,7 +10264,7 @@ S: Maintained F: arch/x86/kernel/cpu/mcheck/* XC2028/3028 TUNER DRIVER -M: Mauro Carvalho Chehab +M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org W: http://linuxtv.org T: git git://linuxtv.org/media_tree.git -- cgit v1.2.3-59-g8ed1b From a2f6734c5f683afe5c1b7c23bd60e08e8ad719c4 Mon Sep 17 00:00:00 2001 From: Kevin Cernekee Date: Mon, 20 Oct 2014 21:28:06 -0700 Subject: MAINTAINERS: Add entry for BCM33xx cable chips Add myself as a maintainer for the new BCM3384 board support code. Signed-off-by: Kevin Cernekee Cc: f.fainelli@gmail.com Cc: mbizon@freebox.fr Cc: jogo@openwrt.org Cc: jfraser@broadcom.com Cc: linux-mips@linux-mips.org Cc: devicetree@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/8171/ Signed-off-by: Ralf Baechle --- MAINTAINERS | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 0ff630de8a6d..42f0199083cf 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2078,6 +2078,14 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rpi/linux-rpi.git S: Maintained N: bcm2835 +BROADCOM BCM33XX MIPS ARCHITECTURE +M: Kevin Cernekee +L: linux-mips@linux-mips.org +S: Maintained +F: arch/mips/bcm3384/* +F: arch/mips/include/asm/mach-bcm3384/* +F: arch/mips/kernel/*bmips* + BROADCOM BCM5301X ARM ARCHITECTURE M: Hauke Mehrtens L: linux-arm-kernel@lists.infradead.org -- cgit v1.2.3-59-g8ed1b From 7110e227c86b83446b3b157df2ebb662c9fcb033 Mon Sep 17 00:00:00 2001 From: Kevin Cernekee Date: Mon, 20 Oct 2014 21:28:07 -0700 Subject: MAINTAINERS: Add entry for bcm63xx/bcm33xx UDC gadget driver This hardware shows up on the newly-supported BCM3384 cable chip, as well as several old BCM63xx DSL chips. Signed-off-by: Kevin Cernekee Cc: f.fainelli@gmail.com Cc: mbizon@freebox.fr Cc: jogo@openwrt.org Cc: jfraser@broadcom.com Cc: linux-mips@linux-mips.org Cc: devicetree@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/8172/ Signed-off-by: Ralf Baechle --- MAINTAINERS | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 42f0199083cf..680861287cb7 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2102,6 +2102,12 @@ S: Maintained F: arch/arm/mach-bcm/bcm63xx.c F: arch/arm/include/debug/bcm63xx.S +BROADCOM BCM63XX/BCM33XX UDC DRIVER +M: Kevin Cernekee +L: linux-usb@vger.kernel.org +S: Maintained +F: drivers/usb/gadget/udc/bcm63xx_udc.* + BROADCOM BCM7XXX ARM ARCHITECTURE M: Marc Carino M: Brian Norris -- cgit v1.2.3-59-g8ed1b From 70371cef114ca295a890069e12c1def08bb300f7 Mon Sep 17 00:00:00 2001 From: Kevin Cernekee Date: Sat, 15 Nov 2014 16:17:45 -0800 Subject: MAINTAINERS: Add entry for BMIPS multiplatform kernel Add myself as a maintainer for the new BMIPS target. Signed-off-by: Kevin Cernekee Cc: f.fainelli@gmail.com Cc: jfraser@broadcom.com Cc: dtor@chromium.org Cc: tglx@linutronix.de Cc: jason@lakedaemon.net Cc: linux-mips@linux-mips.org Cc: devicetree@vger.kernel.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/8505/ Signed-off-by: Ralf Baechle --- MAINTAINERS | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 680861287cb7..eedc64bb1865 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2116,6 +2116,18 @@ S: Maintained F: arch/arm/mach-bcm/*brcmstb* F: arch/arm/boot/dts/bcm7*.dts* +BROADCOM BMIPS MIPS ARCHITECTURE +M: Kevin Cernekee +M: Florian Fainelli +L: linux-mips@linux-mips.org +S: Maintained +F: arch/mips/bmips/* +F: arch/mips/include/asm/mach-bmips/* +F: arch/mips/kernel/*bmips* +F: arch/mips/boot/dts/bcm*.dts* +F: drivers/irqchip/irq-bcm7* +F: drivers/irqchip/irq-brcmstb* + BROADCOM TG3 GIGABIT ETHERNET DRIVER M: Prashant Sreedharan M: Michael Chan -- cgit v1.2.3-59-g8ed1b From e399065be090b2d8abd70c72b9632df67ab0413f Mon Sep 17 00:00:00 2001 From: "Sumit.Saxena@avagotech.com" Date: Mon, 17 Nov 2014 15:24:03 +0530 Subject: megaraid_sas: update MAINTAINERS and copyright information for megaraid drivers Update MAINTAINERS list and copyright information for megaraid_sas driver. Signed-off-by: Sumit Saxena Reviewed-by: Tomas Henzl Signed-off-by: Christoph Hellwig --- MAINTAINERS | 9 ++++++--- drivers/scsi/megaraid/megaraid_sas.h | 16 +++++++++------- drivers/scsi/megaraid/megaraid_sas_base.c | 21 ++++++++++----------- drivers/scsi/megaraid/megaraid_sas_fp.c | 16 +++++++++------- drivers/scsi/megaraid/megaraid_sas_fusion.c | 18 ++++++++++-------- drivers/scsi/megaraid/megaraid_sas_fusion.h | 16 +++++++++------- 6 files changed, 53 insertions(+), 43 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index d206f3779306..1522777cfc66 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5990,10 +5990,13 @@ W: http://linuxtv.org S: Odd Fixes F: drivers/media/parport/pms* -MEGARAID SCSI DRIVERS -M: Neela Syam Kolli +MEGARAID SCSI/SAS DRIVERS +M: Kashyap Desai +M: Sumit Saxena +M: Uday Lingala +L: megaraidlinux.pdl@avagotech.com L: linux-scsi@vger.kernel.org -W: http://megaraid.lsilogic.com +W: http://www.lsi.com S: Maintained F: Documentation/scsi/megaraid.txt F: drivers/scsi/megaraid.* diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index fe546e65dc11..401f4a22b370 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -1,7 +1,8 @@ /* * Linux MegaRAID driver for SAS based RAID controllers * - * Copyright (c) 2003-2012 LSI Corporation. + * Copyright (c) 2003-2013 LSI Corporation + * Copyright (c) 2013-2014 Avago Technologies * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -14,17 +15,18 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * along with this program. If not, see . * * FILE: megaraid_sas.h * - * Authors: LSI Corporation + * Authors: Avago Technologies + * Kashyap Desai + * Sumit Saxena * - * Send feedback to: + * Send feedback to: megaraidlinux.pdl@avagotech.com * - * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 - * ATTN: Linuxraid + * Mail to: Avago Technologies, 350 West Trimble Road, Building 90, + * San Jose, California 95131 */ #ifndef LSI_MEGARAID_SAS_H diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index bf97f3b6494b..7018ec47a43f 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -1,7 +1,8 @@ /* * Linux MegaRAID driver for SAS based RAID controllers * - * Copyright (c) 2003-2012 LSI Corporation. + * Copyright (c) 2003-2013 LSI Corporation + * Copyright (c) 2013-2014 Avago Technologies * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -14,22 +15,20 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * along with this program. If not, see . * - * FILE: megaraid_sas_base.c - * Version : 06.805.06.00-rc1 - * - * Authors: LSI Corporation + * Authors: Avago Technologies * Sreenivas Bagalkote * Sumant Patro * Bo Yang - * Adam Radford + * Adam Radford + * Kashyap Desai + * Sumit Saxena * - * Send feedback to: + * Send feedback to: megaraidlinux.pdl@avagotech.com * - * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 - * ATTN: Linuxraid + * Mail to: Avago Technologies, 350 West Trimble Road, Building 90, + * San Jose, California 95131 */ #include diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index 685e6f391fe4..246574bad910 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -1,7 +1,8 @@ /* * Linux MegaRAID driver for SAS based RAID controllers * - * Copyright (c) 2009-2012 LSI Corporation. + * Copyright (c) 2009-2013 LSI Corporation + * Copyright (c) 2013-2014 Avago Technologies * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -14,20 +15,21 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * along with this program. If not, see . * * FILE: megaraid_sas_fp.c * - * Authors: LSI Corporation + * Authors: Avago Technologies * Sumant Patro * Varad Talamacki * Manoj Jose + * Kashyap Desai + * Sumit Saxena * - * Send feedback to: + * Send feedback to: megaraidlinux.pdl@avagotech.com * - * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 - * ATTN: Linuxraid + * Mail to: Avago Technologies, 350 West Trimble Road, Building 90, + * San Jose, California 95131 */ #include diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index f37eed682c75..92f33548e924 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -1,7 +1,8 @@ /* * Linux MegaRAID driver for SAS based RAID controllers * - * Copyright (c) 2009-2012 LSI Corporation. + * Copyright (c) 2009-2013 LSI Corporation + * Copyright (c) 2013-2014 Avago Technologies * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -14,19 +15,20 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * along with this program. If not, see . * * FILE: megaraid_sas_fusion.c * - * Authors: LSI Corporation + * Authors: Avago Technologies * Sumant Patro - * Adam Radford + * Adam Radford + * Kashyap Desai + * Sumit Saxena * - * Send feedback to: + * Send feedback to: megaraidlinux.pdl@avagotech.com * - * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 - * ATTN: Linuxraid + * Mail to: Avago Technologies, 350 West Trimble Road, Building 90, + * San Jose, California 95131 */ #include diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index 0d183d521bdd..92ecd39373bb 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -1,7 +1,8 @@ /* * Linux MegaRAID driver for SAS based RAID controllers * - * Copyright (c) 2009-2012 LSI Corporation. + * Copyright (c) 2009-2013 LSI Corporation + * Copyright (c) 2013-2014 Avago Technologies * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -14,19 +15,20 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * along with this program. If not, see . * * FILE: megaraid_sas_fusion.h * - * Authors: LSI Corporation + * Authors: Avago Technologies * Manoj Jose * Sumant Patro + * Kashyap Desai + * Sumit Saxena * - * Send feedback to: + * Send feedback to: megaraidlinux.pdl@avagotech.com * - * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 - * ATTN: Linuxraid + * Mail to: Avago Technologies, 350 West Trimble Road, Building 90, + * San Jose, California 95131 */ #ifndef _MEGARAID_SAS_FUSION_H_ -- cgit v1.2.3-59-g8ed1b From 71bd849dbac99b587cb8a2d34061ecb6abe77c84 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Mon, 24 Nov 2014 15:37:30 +0100 Subject: tmscsim: replace by am53c974 driver The am53c974 is a re-implementation of the tmscsim driver, and provides the same functionality. So remove the tmscsim driver and make am53c974 an alias to tmscsim. Acked-by: Guennadi Liakhovetski Signed-off-by: Hannes Reinecke Signed-off-by: Christoph Hellwig --- MAINTAINERS | 7 +- drivers/scsi/Kconfig | 16 - drivers/scsi/Makefile | 1 - drivers/scsi/am53c974.c | 1 + drivers/scsi/tmscsim.c | 2626 ----------------------------------------------- drivers/scsi/tmscsim.h | 549 ---------- 6 files changed, 4 insertions(+), 3196 deletions(-) delete mode 100644 drivers/scsi/tmscsim.c delete mode 100644 drivers/scsi/tmscsim.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 1522777cfc66..921090848a29 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2862,11 +2862,10 @@ F: Documentation/networking/dmfe.txt F: drivers/net/ethernet/dec/tulip/dmfe.c DC390/AM53C974 SCSI driver -M: Kurt Garloff -W: http://www.garloff.de/kurt/linux/dc390/ -M: Guennadi Liakhovetski +M: Hannes Reinecke +L: linux-scsi@vger.kernel.org S: Maintained -F: drivers/scsi/tmscsim.* +F: drivers/scsi/am53c974.c DC395x SCSI driver M: Oliver Neukum diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 519c3ef72fd5..f871a80f38ee 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -1341,22 +1341,6 @@ config SCSI_DC395x To compile this driver as a module, choose M here: the module will be called dc395x. -config SCSI_DC390T - tristate "Tekram DC390(T) and Am53/79C974 SCSI support" - depends on PCI && SCSI - ---help--- - This driver supports PCI SCSI host adapters based on the Am53C974A - chip, e.g. Tekram DC390(T), DawiControl 2974 and some onboard - PCscsi/PCnet (Am53/79C974) solutions. - - Documentation can be found in . - - Note that this driver does NOT support Tekram DC390W/U/F, which are - based on NCR/Symbios chips. Use "NCR53C8XX SCSI support" for those. - - To compile this driver as a module, choose M here: the - module will be called tmscsim. - config SCSI_AM53C974 tristate "Tekram DC390(T) and Am53/79C974 SCSI support (new driver)" depends on PCI && SCSI diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 79a6571e33f9..a9f3fa857610 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -100,7 +100,6 @@ obj-$(CONFIG_SCSI_EATA_PIO) += eata_pio.o obj-$(CONFIG_SCSI_7000FASST) += wd7000.o obj-$(CONFIG_SCSI_EATA) += eata.o obj-$(CONFIG_SCSI_DC395x) += dc395x.o -obj-$(CONFIG_SCSI_DC390T) += tmscsim.o obj-$(CONFIG_SCSI_AM53C974) += esp_scsi.o am53c974.o obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ diff --git a/drivers/scsi/am53c974.c b/drivers/scsi/am53c974.c index 25f06196643e..aa3e2c7cd83c 100644 --- a/drivers/scsi/am53c974.c +++ b/drivers/scsi/am53c974.c @@ -574,6 +574,7 @@ MODULE_DESCRIPTION("AM53C974 SCSI driver"); MODULE_AUTHOR("Hannes Reinecke "); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); +MODULE_ALIAS("tmscsim"); module_param(am53c974_debug, bool, 0644); MODULE_PARM_DESC(am53c974_debug, "Enable debugging"); diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c deleted file mode 100644 index 6c3c2cef3891..000000000000 --- a/drivers/scsi/tmscsim.c +++ /dev/null @@ -1,2626 +0,0 @@ -/************************************************************************ - * FILE NAME : TMSCSIM.C * - * BY : C.L. Huang, ching@tekram.com.tw * - * Description: Device Driver for Tekram DC-390(T) PCI SCSI * - * Bus Master Host Adapter * - * (C)Copyright 1995-1996 Tekram Technology Co., Ltd. * - ************************************************************************ - * (C) Copyright: put under GNU GPL in 10/96 * - * (see Documentation/scsi/tmscsim.txt) * - ************************************************************************ - * $Id: tmscsim.c,v 2.60.2.30 2000/12/20 01:07:12 garloff Exp $ * - * Enhancements and bugfixes by * - * Kurt Garloff * - ************************************************************************ - * HISTORY: * - * * - * REV# DATE NAME DESCRIPTION * - * 1.00 96/04/24 CLH First release * - * 1.01 96/06/12 CLH Fixed bug of Media Change for Removable * - * Device, scan all LUN. Support Pre2.0.10 * - * 1.02 96/06/18 CLH Fixed bug of Command timeout ... * - * 1.03 96/09/25 KG Added tmscsim_proc_info() * - * 1.04 96/10/11 CLH Updating for support KV 2.0.x * - * 1.05 96/10/18 KG Fixed bug in DC390_abort(null ptr deref)* - * 1.06 96/10/25 KG Fixed module support * - * 1.07 96/11/09 KG Fixed tmscsim_proc_info() * - * 1.08 96/11/18 KG Fixed null ptr in DC390_Disconnect() * - * 1.09 96/11/30 KG Added register the allocated IO space * - * 1.10 96/12/05 CLH Modified tmscsim_proc_info(), and reset * - * pending interrupt in DC390_detect() * - * 1.11 97/02/05 KG/CLH Fixeds problem with partitions greater * - * than 1GB * - * 1.12 98/02/15 MJ Rewritten PCI probing * - * 1.13 98/04/08 KG Support for non DC390, __initfunc decls,* - * changed max devs from 10 to 16 * - * 1.14a 98/05/05 KG Dynamic DCB allocation, add-single-dev * - * for LUNs if LUN_SCAN (BIOS) not set * - * runtime config using /proc interface * - * 1.14b 98/05/06 KG eliminated cli (); sti (); spinlocks * - * 1.14c 98/05/07 KG 2.0.x compatibility * - * 1.20a 98/05/07 KG changed names of funcs to be consistent * - * DC390_ (entry points), dc390_ (internal)* - * reworked locking * - * 1.20b 98/05/12 KG bugs: version, kfree, _ctmp * - * debug output * - * 1.20c 98/05/12 KG bugs: kfree, parsing, EEpromDefaults * - * 1.20d 98/05/14 KG bugs: list linkage, clear flag after * - * reset on startup, code cleanup * - * 1.20e 98/05/15 KG spinlock comments, name space cleanup * - * pLastDCB now part of ACB structure * - * added stats, timeout for 2.1, TagQ bug * - * RESET and INQUIRY interface commands * - * 1.20f 98/05/18 KG spinlocks fixes, max_lun fix, free DCBs * - * for missing LUNs, pending int * - * 1.20g 98/05/19 KG Clean up: Avoid short * - * 1.20h 98/05/21 KG Remove AdaptSCSIID, max_lun ... * - * 1.20i 98/05/21 KG Aiiie: Bug with TagQMask * - * 1.20j 98/05/24 KG Handle STAT_BUSY, handle pACB->pLinkDCB * - * == 0 in remove_dev and DoingSRB_Done * - * 1.20k 98/05/25 KG DMA_INT (experimental) * - * 1.20l 98/05/27 KG remove DMA_INT; DMA_IDLE cmds added; * - * 1.20m 98/06/10 KG glitch configurable; made some global * - * vars part of ACB; use DC390_readX * - * 1.20n 98/06/11 KG startup params * - * 1.20o 98/06/15 KG added TagMaxNum to boot/module params * - * Device Nr -> Idx, TagMaxNum power of 2 * - * 1.20p 98/06/17 KG Docu updates. Reset depends on settings * - * pci_set_master added; 2.0.xx: pcibios_* * - * used instead of MechNum things ... * - * 1.20q 98/06/23 KG Changed defaults. Added debug code for * - * removable media and fixed it. TagMaxNum * - * fixed for DC390. Locking: ACB, DRV for * - * better IRQ sharing. Spelling: Queueing * - * Parsing and glitch_cfg changes. Display * - * real SyncSpeed value. Made DisConn * - * functional (!) * - * 1.20r 98/06/30 KG Debug macros, allow disabling DsCn, set * - * BIT4 in CtrlR4, EN_PAGE_INT, 2.0 module * - * param -1 fixed. * - * 1.20s 98/08/20 KG Debug info on abort(), try to check PCI,* - * phys_to_bus instead of phys_to_virt, * - * fixed sel. process, fixed locking, * - * added MODULE_XXX infos, changed IRQ * - * request flags, disable DMA_INT * - * 1.20t 98/09/07 KG TagQ report fixed; Write Erase DMA Stat;* - * initfunc -> __init; better abort; * - * Timeout for XFER_DONE & BLAST_COMPLETE; * - * Allow up to 33 commands being processed * - * 2.0a 98/10/14 KG Max Cmnds back to 17. DMA_Stat clearing * - * all flags. Clear within while() loops * - * in DataIn_0/Out_0. Null ptr in dumpinfo * - * for pSRB==0. Better locking during init.* - * bios_param() now respects part. table. * - * 2.0b 98/10/24 KG Docu fixes. Timeout Msg in DMA Blast. * - * Disallow illegal idx in INQUIRY/REMOVE * - * 2.0c 98/11/19 KG Cleaned up detect/init for SMP boxes, * - * Write Erase DMA (1.20t) caused problems * - * 2.0d 98/12/25 KG Christmas release ;-) Message handling * - * completely reworked. Handle target ini- * - * tiated SDTR correctly. * - * 2.0d1 99/01/25 KG Try to handle RESTORE_PTR * - * 2.0d2 99/02/08 KG Check for failure of kmalloc, correct * - * inclusion of scsicam.h, DelayReset * - * 2.0d3 99/05/31 KG DRIVER_OK -> DID_OK, DID_NO_CONNECT, * - * detect Target mode and warn. * - * pcmd->result handling cleaned up. * - * 2.0d4 99/06/01 KG Cleaned selection process. Found bug * - * which prevented more than 16 tags. Now: * - * 24. SDTR cleanup. Cleaner multi-LUN * - * handling. Don't modify ControlRegs/FIFO * - * when connected. * - * 2.0d5 99/06/01 KG Clear DevID, Fix INQUIRY after cfg chg. * - * 2.0d6 99/06/02 KG Added ADD special command to allow cfg. * - * before detection. Reset SYNC_NEGO_DONE * - * after a bus reset. * - * 2.0d7 99/06/03 KG Fixed bugs wrt add,remove commands * - * 2.0d8 99/06/04 KG Removed copying of cmnd into CmdBlock. * - * Fixed Oops in _release(). * - * 2.0d9 99/06/06 KG Also tag queue INQUIRY, T_U_R, ... * - * Allow arb. no. of Tagged Cmnds. Max 32 * - * 2.0d1099/06/20 KG TagMaxNo changes now honoured! Queueing * - * clearified (renamed ..) TagMask handling* - * cleaned. * - * 2.0d1199/06/28 KG cmd->result now identical to 2.0d2 * - * 2.0d1299/07/04 KG Changed order of processing in IRQ * - * 2.0d1399/07/05 KG Don't update DCB fields if removed * - * 2.0d1499/07/05 KG remove_dev: Move kfree() to the end * - * 2.0d1599/07/12 KG use_new_eh_code: 0, ULONG -> UINT where * - * appropriate * - * 2.0d1699/07/13 KG Reenable StartSCSI interrupt, Retry msg * - * 2.0d1799/07/15 KG Remove debug msg. Disable recfg. when * - * there are queued cmnds * - * 2.0d1899/07/18 KG Selection timeout: Don't requeue * - * 2.0d1999/07/18 KG Abort: Only call scsi_done if dequeued * - * 2.0d2099/07/19 KG Rst_Detect: DoingSRB_Done * - * 2.0d2199/08/15 KG dev_id for request/free_irq, cmnd[0] for* - * RETRY, SRBdone does DID_ABORT for the * - * cmd passed by DC390_reset() * - * 2.0d2299/08/25 KG dev_id fixed. can_queue: 42 * - * 2.0d2399/08/25 KG Removed some debugging code. dev_id * - * now is set to pACB. Use u8,u16,u32. * - * 2.0d2499/11/14 KG Unreg. I/O if failed IRQ alloc. Call * - * done () w/ DID_BAD_TARGET in case of * - * missing DCB. We are old EH!! * - * 2.0d2500/01/15 KG 2.3.3x compat from Andreas Schultz * - * set unique_id. Disable RETRY message. * - * 2.0d2600/01/29 KG Go to new EH. * - * 2.0d2700/01/31 KG ... but maintain 2.0 compat. * - * and fix DCB freeing * - * 2.0d2800/02/14 KG Queue statistics fixed, dump special cmd* - * Waiting_Timer for failed StartSCSI * - * New EH: Don't return cmnds to ML on RST * - * Use old EH (don't have new EH fns yet) * - * Reset: Unlock, but refuse to queue * - * 2.3 __setup function * - * 2.0e 00/05/22 KG Return residual for 2.3 * - * 2.0e1 00/05/25 KG Compile fixes for 2.3.99 * - * 2.0e2 00/05/27 KG Jeff Garzik's pci_enable_device() * - * 2.0e3 00/09/29 KG Some 2.4 changes. Don't try Sync Nego * - * before INQUIRY has reported ability. * - * Recognise INQUIRY as scanning command. * - * 2.0e4 00/10/13 KG Allow compilation into 2.4 kernel * - * 2.0e5 00/11/17 KG Store Inq.flags in DCB * - * 2.0e6 00/11/22 KG 2.4 init function (Thx to O.Schumann) * - * 2.4 PCI device table (Thx to A.Richter) * - * 2.0e7 00/11/28 KG Allow overriding of BIOS settings * - * 2.0f 00/12/20 KG Handle failed INQUIRYs during scan * - * 2.1a 03/11/29 GL, KG Initial fixing for 2.6. Convert to * - * use the current PCI-mapping API, update * - * command-queuing. * - * 2.1b 04/04/13 GL Fix for 64-bit platforms * - * 2.1b1 04/01/31 GL (applied 05.04) Remove internal * - * command-queuing. * - * 2.1b2 04/02/01 CH (applied 05.04) Fix error-handling * - * 2.1c 04/05/23 GL Update to use the new pci_driver API, * - * some scsi EH updates, more cleanup. * - * 2.1d 04/05/27 GL Moved setting of scan_devices to * - * slave_alloc/_configure/_destroy, as * - * suggested by CH. * - ***********************************************************************/ - -/* DEBUG options */ -//#define DC390_DEBUG0 -//#define DC390_DEBUG1 -//#define DC390_DCBDEBUG -//#define DC390_PARSEDEBUG -//#define DC390_REMOVABLEDEBUG -//#define DC390_LOCKDEBUG - -//#define NOP do{}while(0) -#define C_NOP - -/* Debug definitions */ -#ifdef DC390_DEBUG0 -# define DEBUG0(x) x -#else -# define DEBUG0(x) C_NOP -#endif -#ifdef DC390_DEBUG1 -# define DEBUG1(x) x -#else -# define DEBUG1(x) C_NOP -#endif -#ifdef DC390_DCBDEBUG -# define DCBDEBUG(x) x -#else -# define DCBDEBUG(x) C_NOP -#endif -#ifdef DC390_PARSEDEBUG -# define PARSEDEBUG(x) x -#else -# define PARSEDEBUG(x) C_NOP -#endif -#ifdef DC390_REMOVABLEDEBUG -# define REMOVABLEDEBUG(x) x -#else -# define REMOVABLEDEBUG(x) C_NOP -#endif -#define DCBDEBUG1(x) C_NOP - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#define DC390_BANNER "Tekram DC390/AM53C974" -#define DC390_VERSION "2.1d 2004-05-27" - -#define PCI_DEVICE_ID_AMD53C974 PCI_DEVICE_ID_AMD_SCSI - -#include "tmscsim.h" - - -static void dc390_DataOut_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); -static void dc390_DataIn_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); -static void dc390_Command_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); -static void dc390_Status_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); -static void dc390_MsgOut_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); -static void dc390_MsgIn_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); -static void dc390_DataOutPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); -static void dc390_DataInPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); -static void dc390_CommandPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); -static void dc390_StatusPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); -static void dc390_MsgOutPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); -static void dc390_MsgInPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); -static void dc390_Nop_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); -static void dc390_Nop_1( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); - -static void dc390_SetXferRate( struct dc390_acb* pACB, struct dc390_dcb* pDCB ); -static void dc390_Disconnect( struct dc390_acb* pACB ); -static void dc390_Reselect( struct dc390_acb* pACB ); -static void dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB ); -static void dc390_ScsiRstDetect( struct dc390_acb* pACB ); -static void dc390_EnableMsgOut_Abort(struct dc390_acb*, struct dc390_srb*); -static void dc390_dumpinfo(struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB); -static void dc390_ResetDevParam(struct dc390_acb* pACB); - -static u32 dc390_laststatus = 0; -static u8 dc390_adapterCnt = 0; - -static int disable_clustering; -module_param(disable_clustering, int, S_IRUGO); -MODULE_PARM_DESC(disable_clustering, "If you experience problems with your devices, try setting to 1"); - -/* Startup values, to be overriden on the commandline */ -static int tmscsim[] = {-2, -2, -2, -2, -2, -2}; - -module_param_array(tmscsim, int, NULL, 0); -MODULE_PARM_DESC(tmscsim, "Host SCSI ID, Speed (0=10MHz), Device Flags, Adapter Flags, Max Tags (log2(tags)-1), DelayReset (s)"); -MODULE_AUTHOR("C.L. Huang / Kurt Garloff"); -MODULE_DESCRIPTION("SCSI host adapter driver for Tekram DC390 and other AMD53C974A based PCI SCSI adapters"); -MODULE_LICENSE("GPL"); -MODULE_SUPPORTED_DEVICE("sd,sr,sg,st"); - -static void *dc390_phase0[]={ - dc390_DataOut_0, - dc390_DataIn_0, - dc390_Command_0, - dc390_Status_0, - dc390_Nop_0, - dc390_Nop_0, - dc390_MsgOut_0, - dc390_MsgIn_0, - dc390_Nop_1 - }; - -static void *dc390_phase1[]={ - dc390_DataOutPhase, - dc390_DataInPhase, - dc390_CommandPhase, - dc390_StatusPhase, - dc390_Nop_0, - dc390_Nop_0, - dc390_MsgOutPhase, - dc390_MsgInPhase, - dc390_Nop_1 - }; - -#ifdef DC390_DEBUG1 -static char* dc390_p0_str[] = { - "dc390_DataOut_0", - "dc390_DataIn_0", - "dc390_Command_0", - "dc390_Status_0", - "dc390_Nop_0", - "dc390_Nop_0", - "dc390_MsgOut_0", - "dc390_MsgIn_0", - "dc390_Nop_1" - }; - -static char* dc390_p1_str[] = { - "dc390_DataOutPhase", - "dc390_DataInPhase", - "dc390_CommandPhase", - "dc390_StatusPhase", - "dc390_Nop_0", - "dc390_Nop_0", - "dc390_MsgOutPhase", - "dc390_MsgInPhase", - "dc390_Nop_1" - }; -#endif - -static u8 dc390_eepromBuf[MAX_ADAPTER_NUM][EE_LEN]; -static u8 dc390_clock_period1[] = {4, 5, 6, 7, 8, 10, 13, 20}; -static u8 dc390_clock_speed[] = {100,80,67,57,50, 40, 31, 20}; - -/*********************************************************************** - * Functions for the management of the internal structures - * (DCBs, SRBs, Queueing) - * - **********************************************************************/ -static void inline dc390_start_segment(struct dc390_srb* pSRB) -{ - struct scatterlist *psgl = pSRB->pSegmentList; - - /* start new sg segment */ - pSRB->SGBusAddr = sg_dma_address(psgl); - pSRB->SGToBeXferLen = sg_dma_len(psgl); -} - -static unsigned long inline dc390_advance_segment(struct dc390_srb* pSRB, u32 residue) -{ - unsigned long xfer = pSRB->SGToBeXferLen - residue; - - /* xfer more bytes transferred */ - pSRB->SGBusAddr += xfer; - pSRB->TotalXferredLen += xfer; - pSRB->SGToBeXferLen = residue; - - return xfer; -} - -static struct dc390_dcb __inline__ *dc390_findDCB ( struct dc390_acb* pACB, u8 id, u8 lun) -{ - struct dc390_dcb* pDCB = pACB->pLinkDCB; if (!pDCB) return NULL; - while (pDCB->TargetID != id || pDCB->TargetLUN != lun) - { - pDCB = pDCB->pNextDCB; - if (pDCB == pACB->pLinkDCB) - return NULL; - } - DCBDEBUG1( printk (KERN_DEBUG "DCB %p (%02x,%02x) found.\n", \ - pDCB, pDCB->TargetID, pDCB->TargetLUN)); - return pDCB; -} - -/* Insert SRB oin top of free list */ -static __inline__ void dc390_Free_insert (struct dc390_acb* pACB, struct dc390_srb* pSRB) -{ - DEBUG0(printk ("DC390: Free SRB %p\n", pSRB)); - pSRB->pNextSRB = pACB->pFreeSRB; - pACB->pFreeSRB = pSRB; -} - -static __inline__ void dc390_Going_append (struct dc390_dcb* pDCB, struct dc390_srb* pSRB) -{ - pDCB->GoingSRBCnt++; - DEBUG0(printk("DC390: Append SRB %p to Going\n", pSRB)); - /* Append to the list of Going commands */ - if( pDCB->pGoingSRB ) - pDCB->pGoingLast->pNextSRB = pSRB; - else - pDCB->pGoingSRB = pSRB; - - pDCB->pGoingLast = pSRB; - /* No next one in sent list */ - pSRB->pNextSRB = NULL; -} - -static __inline__ void dc390_Going_remove (struct dc390_dcb* pDCB, struct dc390_srb* pSRB) -{ - DEBUG0(printk("DC390: Remove SRB %p from Going\n", pSRB)); - if (pSRB == pDCB->pGoingSRB) - pDCB->pGoingSRB = pSRB->pNextSRB; - else - { - struct dc390_srb* psrb = pDCB->pGoingSRB; - while (psrb && psrb->pNextSRB != pSRB) - psrb = psrb->pNextSRB; - if (!psrb) - { printk (KERN_ERR "DC390: Remove non-ex. SRB %p from Going!\n", pSRB); return; } - psrb->pNextSRB = pSRB->pNextSRB; - if (pSRB == pDCB->pGoingLast) - pDCB->pGoingLast = psrb; - } - pDCB->GoingSRBCnt--; -} - -static struct scatterlist* dc390_sg_build_single(struct scatterlist *sg, void *addr, unsigned int length) -{ - sg_init_one(sg, addr, length); - return sg; -} - -/* Create pci mapping */ -static int dc390_pci_map (struct dc390_srb* pSRB) -{ - int error = 0; - struct scsi_cmnd *pcmd = pSRB->pcmd; - struct pci_dev *pdev = pSRB->pSRBDCB->pDCBACB->pdev; - dc390_cmd_scp_t* cmdp = ((dc390_cmd_scp_t*)(&pcmd->SCp)); - - /* Map sense buffer */ - if (pSRB->SRBFlag & AUTO_REQSENSE) { - pSRB->pSegmentList = dc390_sg_build_single(&pSRB->Segmentx, pcmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); - pSRB->SGcount = pci_map_sg(pdev, pSRB->pSegmentList, 1, - DMA_FROM_DEVICE); - cmdp->saved_dma_handle = sg_dma_address(pSRB->pSegmentList); - - /* TODO: error handling */ - if (pSRB->SGcount != 1) - error = 1; - DEBUG1(printk("%s(): Mapped sense buffer %p at %x\n", __func__, pcmd->sense_buffer, cmdp->saved_dma_handle)); - /* Map SG list */ - } else if (scsi_sg_count(pcmd)) { - int nseg; - - nseg = scsi_dma_map(pcmd); - - pSRB->pSegmentList = scsi_sglist(pcmd); - pSRB->SGcount = nseg; - - /* TODO: error handling */ - if (nseg < 0) - error = 1; - DEBUG1(printk("%s(): Mapped SG %p with %d (%d) elements\n",\ - __func__, scsi_sglist(pcmd), nseg, scsi_sg_count(pcmd))); - /* Map single segment */ - } else - pSRB->SGcount = 0; - - return error; -} - -/* Remove pci mapping */ -static void dc390_pci_unmap (struct dc390_srb* pSRB) -{ - struct scsi_cmnd *pcmd = pSRB->pcmd; - struct pci_dev *pdev = pSRB->pSRBDCB->pDCBACB->pdev; - DEBUG1(dc390_cmd_scp_t* cmdp = ((dc390_cmd_scp_t*)(&pcmd->SCp))); - - if (pSRB->SRBFlag) { - pci_unmap_sg(pdev, &pSRB->Segmentx, 1, DMA_FROM_DEVICE); - DEBUG1(printk("%s(): Unmapped sense buffer at %x\n", __func__, cmdp->saved_dma_handle)); - } else { - scsi_dma_unmap(pcmd); - DEBUG1(printk("%s(): Unmapped SG at %p with %d elements\n", - __func__, scsi_sglist(pcmd), scsi_sg_count(pcmd))); - } -} - -static void __inline__ -dc390_freetag (struct dc390_dcb* pDCB, struct dc390_srb* pSRB) -{ - if (pSRB->TagNumber != SCSI_NO_TAG) { - pDCB->TagMask &= ~(1 << pSRB->TagNumber); /* free tag mask */ - pSRB->TagNumber = SCSI_NO_TAG; - } -} - - -static int -dc390_StartSCSI( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB ) -{ - struct scsi_cmnd *scmd = pSRB->pcmd; - struct scsi_device *sdev = scmd->device; - u8 cmd, disc_allowed, try_sync_nego; - - pSRB->ScsiPhase = SCSI_NOP0; - - if (pACB->Connected) - { - // Should not happen normally - printk (KERN_WARNING "DC390: Can't select when connected! (%08x,%02x)\n", - pSRB->SRBState, pSRB->SRBFlag); - pSRB->SRBState = SRB_READY; - pACB->SelConn++; - return 1; - } - if (time_before (jiffies, pACB->last_reset)) - { - DEBUG0(printk ("DC390: We were just reset and don't accept commands yet!\n")); - return 1; - } - /* KG: Moved pci mapping here */ - dc390_pci_map(pSRB); - /* TODO: error handling */ - DC390_write8 (Scsi_Dest_ID, pDCB->TargetID); - DC390_write8 (Sync_Period, pDCB->SyncPeriod); - DC390_write8 (Sync_Offset, pDCB->SyncOffset); - DC390_write8 (CtrlReg1, pDCB->CtrlR1); - DC390_write8 (CtrlReg3, pDCB->CtrlR3); - DC390_write8 (CtrlReg4, pDCB->CtrlR4); - DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); /* Flush FIFO */ - DEBUG1(printk (KERN_INFO "DC390: Start SCSI command: %02x (Sync:%02x)\n",\ - scmd->cmnd[0], pDCB->SyncMode)); - - /* Don't disconnect on AUTO_REQSENSE, cause it might be an - * Contingent Allegiance Condition (6.6), where no tags should be used. - * All other have to be allowed to disconnect to prevent Incorrect - * Initiator Connection (6.8.2/6.5.2) */ - /* Changed KG, 99/06/06 */ - if (! (pSRB->SRBFlag & AUTO_REQSENSE)) - disc_allowed = pDCB->DevMode & EN_DISCONNECT_; - else - disc_allowed = 0; - - if ((pDCB->SyncMode & SYNC_ENABLE) && pDCB->TargetLUN == 0 && sdev->sdtr && - (((scmd->cmnd[0] == REQUEST_SENSE || (pSRB->SRBFlag & AUTO_REQSENSE)) && - !(pDCB->SyncMode & SYNC_NEGO_DONE)) || scmd->cmnd[0] == INQUIRY)) - try_sync_nego = 1; - else - try_sync_nego = 0; - - pSRB->MsgCnt = 0; - cmd = SEL_W_ATN; - DC390_write8 (ScsiFifo, IDENTIFY(disc_allowed, pDCB->TargetLUN)); - /* Change 99/05/31: Don't use tags when not disconnecting (BUSY) */ - if ((pDCB->SyncMode & EN_TAG_QUEUEING) && disc_allowed && (scmd->flags & SCMD_TAGGED)) { - DC390_write8(ScsiFifo, MSG_SIMPLE_TAG); - pDCB->TagMask |= 1 << scmd->request->tag; - pSRB->TagNumber = scmd->request->tag; - DC390_write8(ScsiFifo, scmd->request->tag); - DEBUG1(printk(KERN_INFO "DC390: Select w/DisCn for SRB %p, block tag %02x\n", pSRB, tag[1])); - cmd = SEL_W_ATN3; - } else { - /* No TagQ */ -//no_tag: - DEBUG1(printk(KERN_INFO "DC390: Select w%s/DisCn for SRB %p, No TagQ\n", disc_allowed ? "" : "o", pSRB)); - } - - pSRB->SRBState = SRB_START_; - - if (try_sync_nego) - { - u8 Sync_Off = pDCB->SyncOffset; - DEBUG0(printk (KERN_INFO "DC390: NEW Sync Nego code triggered (%i %i)\n", pDCB->TargetID, pDCB->TargetLUN)); - pSRB->MsgOutBuf[0] = EXTENDED_MESSAGE; - pSRB->MsgOutBuf[1] = 3; - pSRB->MsgOutBuf[2] = EXTENDED_SDTR; - pSRB->MsgOutBuf[3] = pDCB->NegoPeriod; - if (!(Sync_Off & 0x0f)) Sync_Off = SYNC_NEGO_OFFSET; - pSRB->MsgOutBuf[4] = Sync_Off; - pSRB->MsgCnt = 5; - //pSRB->SRBState = SRB_MSGOUT_; - pSRB->SRBState |= DO_SYNC_NEGO; - cmd = SEL_W_ATN_STOP; - } - - /* Command is written in CommandPhase, if SEL_W_ATN_STOP ... */ - if (cmd != SEL_W_ATN_STOP) - { - if( pSRB->SRBFlag & AUTO_REQSENSE ) - { - DC390_write8 (ScsiFifo, REQUEST_SENSE); - DC390_write8 (ScsiFifo, pDCB->TargetLUN << 5); - DC390_write8 (ScsiFifo, 0); - DC390_write8 (ScsiFifo, 0); - DC390_write8 (ScsiFifo, SCSI_SENSE_BUFFERSIZE); - DC390_write8 (ScsiFifo, 0); - DEBUG1(printk (KERN_DEBUG "DC390: AutoReqSense !\n")); - } - else /* write cmnd to bus */ - { - u8 *ptr; u8 i; - ptr = (u8 *)scmd->cmnd; - for (i = 0; i < scmd->cmd_len; i++) - DC390_write8 (ScsiFifo, *(ptr++)); - } - } - DEBUG0(if (pACB->pActiveDCB) \ - printk (KERN_WARNING "DC390: ActiveDCB != 0\n")); - DEBUG0(if (pDCB->pActiveSRB) \ - printk (KERN_WARNING "DC390: ActiveSRB != 0\n")); - //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD); - if (DC390_read8 (Scsi_Status) & INTERRUPT) - { - dc390_freetag (pDCB, pSRB); - DEBUG0(printk ("DC390: Interrupt during Start SCSI (target %02i-%02i)\n", - scmd->device->id, (u8)scmd->device->lun)); - pSRB->SRBState = SRB_READY; - //DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); - pACB->SelLost++; - return 1; - } - DC390_write8 (ScsiCmd, cmd); - pACB->pActiveDCB = pDCB; - pDCB->pActiveSRB = pSRB; - pACB->Connected = 1; - pSRB->ScsiPhase = SCSI_NOP1; - return 0; -} - - -static void __inline__ -dc390_InvalidCmd(struct dc390_acb* pACB) -{ - if (pACB->pActiveDCB->pActiveSRB->SRBState & (SRB_START_ | SRB_MSGOUT)) - DC390_write8(ScsiCmd, CLEAR_FIFO_CMD); -} - - -static irqreturn_t __inline__ -DC390_Interrupt(void *dev_id) -{ - struct dc390_acb *pACB = dev_id; - struct dc390_dcb *pDCB; - struct dc390_srb *pSRB; - u8 sstatus=0; - u8 phase; - void (*stateV)( struct dc390_acb*, struct dc390_srb*, u8 *); - u8 istate, istatus; - - sstatus = DC390_read8 (Scsi_Status); - if( !(sstatus & INTERRUPT) ) - return IRQ_NONE; - - DEBUG1(printk (KERN_DEBUG "sstatus=%02x,", sstatus)); - - //DC390_write32 (DMA_ScsiBusCtrl, WRT_ERASE_DMA_STAT | EN_INT_ON_PCI_ABORT); - //dstatus = DC390_read8 (DMA_Status); - //DC390_write32 (DMA_ScsiBusCtrl, EN_INT_ON_PCI_ABORT); - - spin_lock_irq(pACB->pScsiHost->host_lock); - - istate = DC390_read8 (Intern_State); - istatus = DC390_read8 (INT_Status); /* This clears Scsi_Status, Intern_State and INT_Status ! */ - - DEBUG1(printk (KERN_INFO "Istatus(Res,Inv,Dis,Serv,Succ,ReS,SelA,Sel)=%02x,",istatus)); - dc390_laststatus &= ~0x00ffffff; - dc390_laststatus |= /* dstatus<<24 | */ sstatus<<16 | istate<<8 | istatus; - - if (sstatus & ILLEGAL_OP_ERR) - { - printk ("DC390: Illegal Operation detected (%08x)!\n", dc390_laststatus); - dc390_dumpinfo (pACB, pACB->pActiveDCB, pACB->pActiveDCB->pActiveSRB); - } - - else if (istatus & INVALID_CMD) - { - printk ("DC390: Invalid Command detected (%08x)!\n", dc390_laststatus); - dc390_InvalidCmd( pACB ); - goto unlock; - } - - if (istatus & SCSI_RESET) - { - dc390_ScsiRstDetect( pACB ); - goto unlock; - } - - if (istatus & DISCONNECTED) - { - dc390_Disconnect( pACB ); - goto unlock; - } - - if (istatus & RESELECTED) - { - dc390_Reselect( pACB ); - goto unlock; - } - - else if (istatus & (SELECTED | SEL_ATTENTION)) - { - printk (KERN_ERR "DC390: Target mode not supported!\n"); - goto unlock; - } - - if (istatus & (SUCCESSFUL_OP|SERVICE_REQUEST) ) - { - pDCB = pACB->pActiveDCB; - if (!pDCB) - { - printk (KERN_ERR "DC390: Suc. op/ Serv. req: pActiveDCB = 0!\n"); - goto unlock; - } - pSRB = pDCB->pActiveSRB; - if( pDCB->DCBFlag & ABORT_DEV_ ) - dc390_EnableMsgOut_Abort (pACB, pSRB); - - phase = pSRB->ScsiPhase; - DEBUG1(printk (KERN_INFO "DC390: [%i]%s(0) (%02x)\n", phase, dc390_p0_str[phase], sstatus)); - stateV = (void *) dc390_phase0[phase]; - ( *stateV )( pACB, pSRB, &sstatus ); - - pSRB->ScsiPhase = sstatus & 7; - phase = (u8) sstatus & 7; - DEBUG1(printk (KERN_INFO "DC390: [%i]%s(1) (%02x)\n", phase, dc390_p1_str[phase], sstatus)); - stateV = (void *) dc390_phase1[phase]; - ( *stateV )( pACB, pSRB, &sstatus ); - } - - unlock: - spin_unlock_irq(pACB->pScsiHost->host_lock); - return IRQ_HANDLED; -} - -static irqreturn_t do_DC390_Interrupt(int irq, void *dev_id) -{ - irqreturn_t ret; - DEBUG1(printk (KERN_INFO "DC390: Irq (%i) caught: ", irq)); - /* Locking is done in DC390_Interrupt */ - ret = DC390_Interrupt(dev_id); - DEBUG1(printk (".. IRQ returned\n")); - return ret; -} - -static void -dc390_DataOut_0(struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) -{ - u8 sstatus; - u32 ResidCnt; - u8 dstate = 0; - - sstatus = *psstatus; - - if( !(pSRB->SRBState & SRB_XFERPAD) ) - { - if( sstatus & (PARITY_ERR | ILLEGAL_OP_ERR) ) - pSRB->SRBStatus |= PARITY_ERROR; - - if( sstatus & COUNT_2_ZERO ) - { - unsigned long timeout = jiffies + HZ; - - /* Function called from the ISR with the host_lock held and interrupts disabled */ - if (pSRB->SGToBeXferLen) - while (time_before(jiffies, timeout) && !((dstate = DC390_read8 (DMA_Status)) & DMA_XFER_DONE)) { - spin_unlock_irq(pACB->pScsiHost->host_lock); - udelay(50); - spin_lock_irq(pACB->pScsiHost->host_lock); - } - if (!time_before(jiffies, timeout)) - printk (KERN_CRIT "DC390: Deadlock in DataOut_0: DMA aborted unfinished: %06x bytes remain!!\n", - DC390_read32 (DMA_Wk_ByteCntr)); - dc390_laststatus &= ~0xff000000; - dc390_laststatus |= dstate << 24; - pSRB->TotalXferredLen += pSRB->SGToBeXferLen; - pSRB->SGIndex++; - if( pSRB->SGIndex < pSRB->SGcount ) - { - pSRB->pSegmentList++; - - dc390_start_segment(pSRB); - } - else - pSRB->SGToBeXferLen = 0; - } - else - { - ResidCnt = ((u32) DC390_read8 (Current_Fifo) & 0x1f) + - (((u32) DC390_read8 (CtcReg_High) << 16) | - ((u32) DC390_read8 (CtcReg_Mid) << 8) | - (u32) DC390_read8 (CtcReg_Low)); - - dc390_advance_segment(pSRB, ResidCnt); - } - } - if ((*psstatus & 7) != SCSI_DATA_OUT) - { - DC390_write8 (DMA_Cmd, WRITE_DIRECTION+DMA_IDLE_CMD); - DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); - } -} - -static void -dc390_DataIn_0(struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) -{ - u8 sstatus, residual, bval; - u32 ResidCnt, i; - unsigned long xferCnt; - - sstatus = *psstatus; - - if( !(pSRB->SRBState & SRB_XFERPAD) ) - { - if( sstatus & (PARITY_ERR | ILLEGAL_OP_ERR)) - pSRB->SRBStatus |= PARITY_ERROR; - - if( sstatus & COUNT_2_ZERO ) - { - int dstate = 0; - unsigned long timeout = jiffies + HZ; - - /* Function called from the ISR with the host_lock held and interrupts disabled */ - if (pSRB->SGToBeXferLen) - while (time_before(jiffies, timeout) && !((dstate = DC390_read8 (DMA_Status)) & DMA_XFER_DONE)) { - spin_unlock_irq(pACB->pScsiHost->host_lock); - udelay(50); - spin_lock_irq(pACB->pScsiHost->host_lock); - } - if (!time_before(jiffies, timeout)) { - printk (KERN_CRIT "DC390: Deadlock in DataIn_0: DMA aborted unfinished: %06x bytes remain!!\n", - DC390_read32 (DMA_Wk_ByteCntr)); - printk (KERN_CRIT "DC390: DataIn_0: DMA State: %i\n", dstate); - } - dc390_laststatus &= ~0xff000000; - dc390_laststatus |= dstate << 24; - DEBUG1(ResidCnt = ((unsigned long) DC390_read8 (CtcReg_High) << 16) \ - + ((unsigned long) DC390_read8 (CtcReg_Mid) << 8) \ - + ((unsigned long) DC390_read8 (CtcReg_Low))); - DEBUG1(printk (KERN_DEBUG "Count_2_Zero (ResidCnt=%u,ToBeXfer=%lu),", ResidCnt, pSRB->SGToBeXferLen)); - - DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD); - - pSRB->TotalXferredLen += pSRB->SGToBeXferLen; - pSRB->SGIndex++; - if( pSRB->SGIndex < pSRB->SGcount ) - { - pSRB->pSegmentList++; - - dc390_start_segment(pSRB); - } - else - pSRB->SGToBeXferLen = 0; - } - else /* phase changed */ - { - residual = 0; - bval = DC390_read8 (Current_Fifo); - while( bval & 0x1f ) - { - DEBUG1(printk (KERN_DEBUG "Check for residuals,")); - if( (bval & 0x1f) == 1 ) - { - for(i=0; i < 0x100; i++) - { - bval = DC390_read8 (Current_Fifo); - if( !(bval & 0x1f) ) - goto din_1; - else if( i == 0x0ff ) - { - residual = 1; /* ;1 residual byte */ - goto din_1; - } - } - } - else - bval = DC390_read8 (Current_Fifo); - } -din_1: - DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_BLAST_CMD); - for (i = 0xa000; i; i--) - { - bval = DC390_read8 (DMA_Status); - if (bval & BLAST_COMPLETE) - break; - } - /* It seems a DMA Blast abort isn't that bad ... */ - if (!i) printk (KERN_ERR "DC390: DMA Blast aborted unfinished!\n"); - //DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD); - dc390_laststatus &= ~0xff000000; - dc390_laststatus |= bval << 24; - - DEBUG1(printk (KERN_DEBUG "Blast: Read %i times DMA_Status %02x", 0xa000-i, bval)); - ResidCnt = (((u32) DC390_read8 (CtcReg_High) << 16) | - ((u32) DC390_read8 (CtcReg_Mid) << 8)) | - (u32) DC390_read8 (CtcReg_Low); - - xferCnt = dc390_advance_segment(pSRB, ResidCnt); - - if (residual) { - size_t count = 1; - size_t offset = pSRB->SGBusAddr - sg_dma_address(pSRB->pSegmentList); - unsigned long flags; - u8 *ptr; - - bval = DC390_read8 (ScsiFifo); /* get one residual byte */ - - local_irq_save(flags); - ptr = scsi_kmap_atomic_sg(pSRB->pSegmentList, pSRB->SGcount, &offset, &count); - if (likely(ptr)) { - *(ptr + offset) = bval; - scsi_kunmap_atomic_sg(ptr); - } - local_irq_restore(flags); - WARN_ON(!ptr); - - /* 1 more byte read */ - xferCnt += dc390_advance_segment(pSRB, pSRB->SGToBeXferLen - 1); - } - DEBUG1(printk (KERN_DEBUG "Xfered: %lu, Total: %lu, Remaining: %lu\n", xferCnt,\ - pSRB->TotalXferredLen, pSRB->SGToBeXferLen)); - } - } - if ((*psstatus & 7) != SCSI_DATA_IN) - { - DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); - DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD); - } -} - -static void -dc390_Command_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) -{ -} - -static void -dc390_Status_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) -{ - - pSRB->TargetStatus = DC390_read8 (ScsiFifo); - //udelay (1); - pSRB->EndMessage = DC390_read8 (ScsiFifo); /* get message */ - - *psstatus = SCSI_NOP0; - pSRB->SRBState = SRB_COMPLETED; - DC390_write8 (ScsiCmd, MSG_ACCEPTED_CMD); -} - -static void -dc390_MsgOut_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) -{ - if( pSRB->SRBState & (SRB_UNEXPECT_RESEL+SRB_ABORT_SENT) ) - *psstatus = SCSI_NOP0; - //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD); -} - - -static void __inline__ -dc390_reprog (struct dc390_acb* pACB, struct dc390_dcb* pDCB) -{ - DC390_write8 (Sync_Period, pDCB->SyncPeriod); - DC390_write8 (Sync_Offset, pDCB->SyncOffset); - DC390_write8 (CtrlReg3, pDCB->CtrlR3); - DC390_write8 (CtrlReg4, pDCB->CtrlR4); - dc390_SetXferRate (pACB, pDCB); -} - - -#ifdef DC390_DEBUG0 -static void -dc390_printMsg (u8 *MsgBuf, u8 len) -{ - int i; - printk (" %02x", MsgBuf[0]); - for (i = 1; i < len; i++) - printk (" %02x", MsgBuf[i]); - printk ("\n"); -} -#endif - -#define DC390_ENABLE_MSGOUT DC390_write8 (ScsiCmd, SET_ATN_CMD) - -/* reject_msg */ -static void __inline__ -dc390_MsgIn_reject (struct dc390_acb* pACB, struct dc390_srb* pSRB) -{ - pSRB->MsgOutBuf[0] = MESSAGE_REJECT; - pSRB->MsgCnt = 1; - DC390_ENABLE_MSGOUT; - DEBUG0 (printk (KERN_INFO "DC390: Reject message\n")); -} - -/* abort command */ -static void -dc390_EnableMsgOut_Abort ( struct dc390_acb* pACB, struct dc390_srb* pSRB ) -{ - pSRB->MsgOutBuf[0] = ABORT; - pSRB->MsgCnt = 1; DC390_ENABLE_MSGOUT; - pSRB->pSRBDCB->DCBFlag &= ~ABORT_DEV_; -} - -static struct dc390_srb* -dc390_MsgIn_QTag (struct dc390_acb* pACB, struct dc390_dcb* pDCB, s8 tag) -{ - struct dc390_srb* pSRB = pDCB->pGoingSRB; - - if (pSRB) - { - struct scsi_cmnd *scmd = scsi_find_tag(pSRB->pcmd->device, tag); - pSRB = (struct dc390_srb *)scmd->host_scribble; - - if (pDCB->DCBFlag & ABORT_DEV_) - { - pSRB->SRBState = SRB_ABORT_SENT; - dc390_EnableMsgOut_Abort( pACB, pSRB ); - } - - if (!(pSRB->SRBState & SRB_DISCONNECT)) - goto mingx0; - - pDCB->pActiveSRB = pSRB; - pSRB->SRBState = SRB_DATA_XFER; - } - else - { - mingx0: - pSRB = pACB->pTmpSRB; - pSRB->SRBState = SRB_UNEXPECT_RESEL; - pDCB->pActiveSRB = pSRB; - pSRB->MsgOutBuf[0] = ABORT_TAG; - pSRB->MsgCnt = 1; DC390_ENABLE_MSGOUT; - } - return pSRB; -} - - -/* set async transfer mode */ -static void -dc390_MsgIn_set_async (struct dc390_acb* pACB, struct dc390_srb* pSRB) -{ - struct dc390_dcb* pDCB = pSRB->pSRBDCB; - if (!(pSRB->SRBState & DO_SYNC_NEGO)) - printk (KERN_INFO "DC390: Target %i initiates Non-Sync?\n", pDCB->TargetID); - pSRB->SRBState &= ~DO_SYNC_NEGO; - pDCB->SyncMode &= ~(SYNC_ENABLE+SYNC_NEGO_DONE); - pDCB->SyncPeriod = 0; - pDCB->SyncOffset = 0; - //pDCB->NegoPeriod = 50; /* 200ns <=> 5 MHz */ - pDCB->CtrlR3 = FAST_CLK; /* fast clock / normal scsi */ - pDCB->CtrlR4 &= 0x3f; - pDCB->CtrlR4 |= pACB->glitch_cfg; /* glitch eater */ - dc390_reprog (pACB, pDCB); -} - -/* set sync transfer mode */ -static void -dc390_MsgIn_set_sync (struct dc390_acb* pACB, struct dc390_srb* pSRB) -{ - u8 bval; - u16 wval, wval1; - struct dc390_dcb* pDCB = pSRB->pSRBDCB; - u8 oldsyncperiod = pDCB->SyncPeriod; - u8 oldsyncoffset = pDCB->SyncOffset; - - if (!(pSRB->SRBState & DO_SYNC_NEGO)) - { - printk (KERN_INFO "DC390: Target %i initiates Sync: %ins %i ... answer ...\n", - pDCB->TargetID, pSRB->MsgInBuf[3]<<2, pSRB->MsgInBuf[4]); - - /* reject */ - //dc390_MsgIn_reject (pACB, pSRB); - //return dc390_MsgIn_set_async (pACB, pSRB); - - /* Reply with corrected SDTR Message */ - if (pSRB->MsgInBuf[4] > 15) - { - printk (KERN_INFO "DC390: Lower Sync Offset to 15\n"); - pSRB->MsgInBuf[4] = 15; - } - if (pSRB->MsgInBuf[3] < pDCB->NegoPeriod) - { - printk (KERN_INFO "DC390: Set sync nego period to %ins\n", pDCB->NegoPeriod << 2); - pSRB->MsgInBuf[3] = pDCB->NegoPeriod; - } - memcpy (pSRB->MsgOutBuf, pSRB->MsgInBuf, 5); - pSRB->MsgCnt = 5; - DC390_ENABLE_MSGOUT; - } - - pSRB->SRBState &= ~DO_SYNC_NEGO; - pDCB->SyncMode |= SYNC_ENABLE+SYNC_NEGO_DONE; - pDCB->SyncOffset &= 0x0f0; - pDCB->SyncOffset |= pSRB->MsgInBuf[4]; - pDCB->NegoPeriod = pSRB->MsgInBuf[3]; - - wval = (u16) pSRB->MsgInBuf[3]; - wval = wval << 2; wval -= 3; wval1 = wval / 25; /* compute speed */ - if( (wval1 * 25) != wval) wval1++; - bval = FAST_CLK+FAST_SCSI; /* fast clock / fast scsi */ - - pDCB->CtrlR4 &= 0x3f; /* Glitch eater: 12ns less than normal */ - if (pACB->glitch_cfg != NS_TO_GLITCH(0)) - pDCB->CtrlR4 |= NS_TO_GLITCH(((GLITCH_TO_NS(pACB->glitch_cfg)) - 1)); - else - pDCB->CtrlR4 |= NS_TO_GLITCH(0); - if (wval1 < 4) pDCB->CtrlR4 |= NS_TO_GLITCH(0); /* Ultra */ - - if (wval1 >= 8) - { - wval1--; /* Timing computation differs by 1 from FAST_SCSI */ - bval = FAST_CLK; /* fast clock / normal scsi */ - pDCB->CtrlR4 |= pACB->glitch_cfg; /* glitch eater */ - } - - pDCB->CtrlR3 = bval; - pDCB->SyncPeriod = (u8)wval1; - - if ((oldsyncperiod != wval1 || oldsyncoffset != pDCB->SyncOffset) && pDCB->TargetLUN == 0) - { - if (! (bval & FAST_SCSI)) wval1++; - printk (KERN_INFO "DC390: Target %i: Sync transfer %i.%1i MHz, Offset %i\n", pDCB->TargetID, - 40/wval1, ((40%wval1)*10+wval1/2)/wval1, pDCB->SyncOffset & 0x0f); - } - - dc390_reprog (pACB, pDCB); -} - - -/* handle RESTORE_PTR */ -/* This doesn't look very healthy... to-be-fixed */ -static void -dc390_restore_ptr (struct dc390_acb* pACB, struct dc390_srb* pSRB) -{ - struct scsi_cmnd *pcmd = pSRB->pcmd; - struct scatterlist *psgl; - pSRB->TotalXferredLen = 0; - pSRB->SGIndex = 0; - if (scsi_sg_count(pcmd)) { - size_t saved; - pSRB->pSegmentList = scsi_sglist(pcmd); - psgl = pSRB->pSegmentList; - //dc390_pci_sync(pSRB); - - while (pSRB->TotalXferredLen + (unsigned long) sg_dma_len(psgl) < pSRB->Saved_Ptr) - { - pSRB->TotalXferredLen += (unsigned long) sg_dma_len(psgl); - pSRB->SGIndex++; - if( pSRB->SGIndex < pSRB->SGcount ) - { - pSRB->pSegmentList++; - - dc390_start_segment(pSRB); - } - else - pSRB->SGToBeXferLen = 0; - } - - saved = pSRB->Saved_Ptr - pSRB->TotalXferredLen; - pSRB->SGToBeXferLen -= saved; - pSRB->SGBusAddr += saved; - printk (KERN_INFO "DC390: Pointer restored. Segment %i, Total %li, Bus %08lx\n", - pSRB->SGIndex, pSRB->Saved_Ptr, pSRB->SGBusAddr); - - } else { - pSRB->SGcount = 0; - printk (KERN_INFO "DC390: RESTORE_PTR message for Transfer without Scatter-Gather ??\n"); - } - - pSRB->TotalXferredLen = pSRB->Saved_Ptr; -} - - -/* According to the docs, the AM53C974 reads the message and - * generates a Successful Operation IRQ before asserting ACK for - * the last byte (how does it know whether it's the last ?) */ -/* The old code handled it in another way, indicating, that on - * every message byte an IRQ is generated and every byte has to - * be manually ACKed. Hmmm ? (KG, 98/11/28) */ -/* The old implementation was correct. Sigh! */ - -/* Check if the message is complete */ -static u8 __inline__ -dc390_MsgIn_complete (u8 *msgbuf, u32 len) -{ - if (*msgbuf == EXTENDED_MESSAGE) - { - if (len < 2) return 0; - if (len < msgbuf[1] + 2) return 0; - } - else if (*msgbuf >= 0x20 && *msgbuf <= 0x2f) // two byte messages - if (len < 2) return 0; - return 1; -} - - - -/* read and eval received messages */ -static void -dc390_MsgIn_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) -{ - struct dc390_dcb* pDCB = pACB->pActiveDCB; - - /* Read the msg */ - - pSRB->MsgInBuf[pACB->MsgLen++] = DC390_read8 (ScsiFifo); - //pSRB->SRBState = 0; - - /* Msg complete ? */ - if (dc390_MsgIn_complete (pSRB->MsgInBuf, pACB->MsgLen)) - { - DEBUG0 (printk (KERN_INFO "DC390: MsgIn:"); dc390_printMsg (pSRB->MsgInBuf, pACB->MsgLen)); - /* Now eval the msg */ - switch (pSRB->MsgInBuf[0]) - { - case DISCONNECT: - pSRB->SRBState = SRB_DISCONNECT; break; - - case SIMPLE_QUEUE_TAG: - case HEAD_OF_QUEUE_TAG: - case ORDERED_QUEUE_TAG: - pSRB = dc390_MsgIn_QTag (pACB, pDCB, pSRB->MsgInBuf[1]); - break; - - case MESSAGE_REJECT: - DC390_write8 (ScsiCmd, RESET_ATN_CMD); - pDCB->NegoPeriod = 50; /* 200ns <=> 5 MHz */ - if( pSRB->SRBState & DO_SYNC_NEGO) - dc390_MsgIn_set_async (pACB, pSRB); - break; - - case EXTENDED_MESSAGE: - /* reject every extended msg but SDTR */ - if (pSRB->MsgInBuf[1] != 3 || pSRB->MsgInBuf[2] != EXTENDED_SDTR) - dc390_MsgIn_reject (pACB, pSRB); - else - { - if (pSRB->MsgInBuf[3] == 0 || pSRB->MsgInBuf[4] == 0) - dc390_MsgIn_set_async (pACB, pSRB); - else - dc390_MsgIn_set_sync (pACB, pSRB); - } - - // nothing has to be done - case COMMAND_COMPLETE: break; - - // SAVE POINTER may be ignored as we have the struct dc390_srb* associated with the - // scsi command. Thanks, Gerard, for pointing it out. - case SAVE_POINTERS: - pSRB->Saved_Ptr = pSRB->TotalXferredLen; - break; - // The device might want to restart transfer with a RESTORE - case RESTORE_POINTERS: - DEBUG0(printk ("DC390: RESTORE POINTER message received ... try to handle\n")); - dc390_restore_ptr (pACB, pSRB); - break; - - // reject unknown messages - default: dc390_MsgIn_reject (pACB, pSRB); - } - - /* Clear counter and MsgIn state */ - pSRB->SRBState &= ~SRB_MSGIN; - pACB->MsgLen = 0; - } - - *psstatus = SCSI_NOP0; - DC390_write8 (ScsiCmd, MSG_ACCEPTED_CMD); - //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD); -} - - -static void -dc390_DataIO_Comm( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 ioDir) -{ - unsigned long lval; - struct dc390_dcb* pDCB = pACB->pActiveDCB; - - if (pSRB == pACB->pTmpSRB) - { - if (pDCB) - printk(KERN_ERR "DC390: pSRB == pTmpSRB! (TagQ Error?) (%02i-%i)\n", pDCB->TargetID, pDCB->TargetLUN); - else - printk(KERN_ERR "DC390: pSRB == pTmpSRB! (TagQ Error?) (DCB 0!)\n"); - - /* Try to recover - some broken disks react badly to tagged INQUIRY */ - if (pDCB && pACB->scan_devices && pDCB->GoingSRBCnt == 1) { - pSRB = pDCB->pGoingSRB; - pDCB->pActiveSRB = pSRB; - } else { - pSRB->pSRBDCB = pDCB; - dc390_EnableMsgOut_Abort(pACB, pSRB); - if (pDCB) - pDCB->DCBFlag |= ABORT_DEV; - return; - } - } - - if( pSRB->SGIndex < pSRB->SGcount ) - { - DC390_write8 (DMA_Cmd, DMA_IDLE_CMD | ioDir); - if( !pSRB->SGToBeXferLen ) - { - dc390_start_segment(pSRB); - - DEBUG1(printk (KERN_DEBUG " DC390: Next SG segment.")); - } - lval = pSRB->SGToBeXferLen; - DEBUG1(printk (KERN_DEBUG " DC390: Start transfer: %li bytes (address %08lx)\n", lval, pSRB->SGBusAddr)); - DC390_write8 (CtcReg_Low, (u8) lval); - lval >>= 8; - DC390_write8 (CtcReg_Mid, (u8) lval); - lval >>= 8; - DC390_write8 (CtcReg_High, (u8) lval); - - DC390_write32 (DMA_XferCnt, pSRB->SGToBeXferLen); - DC390_write32 (DMA_XferAddr, pSRB->SGBusAddr); - - //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD | ioDir); - pSRB->SRBState = SRB_DATA_XFER; - - DC390_write8 (ScsiCmd, DMA_COMMAND+INFO_XFER_CMD); - - DC390_write8 (DMA_Cmd, DMA_START_CMD | ioDir); - //DEBUG1(DC390_write32 (DMA_ScsiBusCtrl, WRT_ERASE_DMA_STAT | EN_INT_ON_PCI_ABORT)); - //DEBUG1(printk (KERN_DEBUG "DC390: DMA_Status: %02x\n", DC390_read8 (DMA_Status))); - //DEBUG1(DC390_write32 (DMA_ScsiBusCtrl, EN_INT_ON_PCI_ABORT)); - } - else /* xfer pad */ - { - if( pSRB->SGcount ) - { - pSRB->AdaptStatus = H_OVER_UNDER_RUN; - pSRB->SRBStatus |= OVER_RUN; - DEBUG0(printk (KERN_WARNING " DC390: Overrun -")); - } - DEBUG0(printk (KERN_WARNING " Clear transfer pad \n")); - DC390_write8 (CtcReg_Low, 0); - DC390_write8 (CtcReg_Mid, 0); - DC390_write8 (CtcReg_High, 0); - - pSRB->SRBState |= SRB_XFERPAD; - DC390_write8 (ScsiCmd, DMA_COMMAND+XFER_PAD_BYTE); -/* - DC390_write8 (DMA_Cmd, DMA_IDLE_CMD | ioDir); - DC390_write8 (DMA_Cmd, DMA_START_CMD | ioDir); -*/ - } -} - - -static void -dc390_DataOutPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) -{ - dc390_DataIO_Comm (pACB, pSRB, WRITE_DIRECTION); -} - -static void -dc390_DataInPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) -{ - dc390_DataIO_Comm (pACB, pSRB, READ_DIRECTION); -} - -static void -dc390_CommandPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) -{ - struct dc390_dcb* pDCB; - u8 i, cnt; - u8 *ptr; - - DC390_write8 (ScsiCmd, RESET_ATN_CMD); - DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); - if( !(pSRB->SRBFlag & AUTO_REQSENSE) ) - { - cnt = (u8) pSRB->pcmd->cmd_len; - ptr = (u8 *) pSRB->pcmd->cmnd; - for(i=0; i < cnt; i++) - DC390_write8 (ScsiFifo, *(ptr++)); - } - else - { - DC390_write8 (ScsiFifo, REQUEST_SENSE); - pDCB = pACB->pActiveDCB; - DC390_write8 (ScsiFifo, pDCB->TargetLUN << 5); - DC390_write8 (ScsiFifo, 0); - DC390_write8 (ScsiFifo, 0); - DC390_write8 (ScsiFifo, SCSI_SENSE_BUFFERSIZE); - DC390_write8 (ScsiFifo, 0); - DEBUG0(printk(KERN_DEBUG "DC390: AutoReqSense (CmndPhase)!\n")); - } - pSRB->SRBState = SRB_COMMAND; - DC390_write8 (ScsiCmd, INFO_XFER_CMD); -} - -static void -dc390_StatusPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) -{ - DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); - pSRB->SRBState = SRB_STATUS; - DC390_write8 (ScsiCmd, INITIATOR_CMD_CMPLTE); - //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD); -} - -static void -dc390_MsgOutPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) -{ - u8 bval, i, cnt; - u8 *ptr; - struct dc390_dcb* pDCB; - - DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); - pDCB = pACB->pActiveDCB; - if( !(pSRB->SRBState & SRB_MSGOUT) ) - { - cnt = pSRB->MsgCnt; - if( cnt ) - { - ptr = (u8 *) pSRB->MsgOutBuf; - for(i=0; i < cnt; i++) - DC390_write8 (ScsiFifo, *(ptr++)); - pSRB->MsgCnt = 0; - if( (pDCB->DCBFlag & ABORT_DEV_) && - (pSRB->MsgOutBuf[0] == ABORT) ) - pSRB->SRBState = SRB_ABORT_SENT; - } - else - { - bval = ABORT; /* ??? MSG_NOP */ - if( (pSRB->pcmd->cmnd[0] == INQUIRY ) || - (pSRB->pcmd->cmnd[0] == REQUEST_SENSE) || - (pSRB->SRBFlag & AUTO_REQSENSE) ) - { - if( pDCB->SyncMode & SYNC_ENABLE ) - goto mop1; - } - DC390_write8 (ScsiFifo, bval); - } - DC390_write8 (ScsiCmd, INFO_XFER_CMD); - } - else - { -mop1: - printk (KERN_ERR "DC390: OLD Sync Nego code triggered! (%i %i)\n", pDCB->TargetID, pDCB->TargetLUN); - DC390_write8 (ScsiFifo, EXTENDED_MESSAGE); - DC390_write8 (ScsiFifo, 3); /* ;length of extended msg */ - DC390_write8 (ScsiFifo, EXTENDED_SDTR); /* ; sync nego */ - DC390_write8 (ScsiFifo, pDCB->NegoPeriod); - if (pDCB->SyncOffset & 0x0f) - DC390_write8 (ScsiFifo, pDCB->SyncOffset); - else - DC390_write8 (ScsiFifo, SYNC_NEGO_OFFSET); - pSRB->SRBState |= DO_SYNC_NEGO; - DC390_write8 (ScsiCmd, INFO_XFER_CMD); - } -} - -static void -dc390_MsgInPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) -{ - DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); - if( !(pSRB->SRBState & SRB_MSGIN) ) - { - pSRB->SRBState &= ~SRB_DISCONNECT; - pSRB->SRBState |= SRB_MSGIN; - } - DC390_write8 (ScsiCmd, INFO_XFER_CMD); - //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD); -} - -static void -dc390_Nop_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) -{ -} - -static void -dc390_Nop_1( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) -{ -} - - -static void -dc390_SetXferRate( struct dc390_acb* pACB, struct dc390_dcb* pDCB ) -{ - u8 bval, i, cnt; - struct dc390_dcb* ptr; - - if( !(pDCB->TargetLUN) ) - { - if( !pACB->scan_devices ) - { - ptr = pACB->pLinkDCB; - cnt = pACB->DCBCnt; - bval = pDCB->TargetID; - for(i=0; iTargetID == bval ) - { - ptr->SyncPeriod = pDCB->SyncPeriod; - ptr->SyncOffset = pDCB->SyncOffset; - ptr->CtrlR3 = pDCB->CtrlR3; - ptr->CtrlR4 = pDCB->CtrlR4; - ptr->SyncMode = pDCB->SyncMode; - } - ptr = ptr->pNextDCB; - } - } - } - return; -} - - -static void -dc390_Disconnect( struct dc390_acb* pACB ) -{ - struct dc390_dcb *pDCB; - struct dc390_srb *pSRB, *psrb; - u8 i, cnt; - - DEBUG0(printk(KERN_INFO "DISC,")); - - if (!pACB->Connected) printk(KERN_ERR "DC390: Disconnect not-connected bus?\n"); - pACB->Connected = 0; - pDCB = pACB->pActiveDCB; - if (!pDCB) - { - DEBUG0(printk(KERN_ERR "ACB:%p->ActiveDCB:%p IOPort:%04x IRQ:%02x !\n",\ - pACB, pDCB, pACB->IOPortBase, pACB->IRQLevel)); - mdelay(400); - DC390_read8 (INT_Status); /* Reset Pending INT */ - DC390_write8 (ScsiCmd, EN_SEL_RESEL); - return; - } - DC390_write8 (ScsiCmd, EN_SEL_RESEL); - pSRB = pDCB->pActiveSRB; - pACB->pActiveDCB = NULL; - pSRB->ScsiPhase = SCSI_NOP0; - if( pSRB->SRBState & SRB_UNEXPECT_RESEL ) - pSRB->SRBState = 0; - else if( pSRB->SRBState & SRB_ABORT_SENT ) - { - pDCB->TagMask = 0; - pDCB->DCBFlag = 0; - cnt = pDCB->GoingSRBCnt; - pDCB->GoingSRBCnt = 0; - pSRB = pDCB->pGoingSRB; - for( i=0; i < cnt; i++) - { - psrb = pSRB->pNextSRB; - dc390_Free_insert (pACB, pSRB); - pSRB = psrb; - } - pDCB->pGoingSRB = NULL; - } - else - { - if( (pSRB->SRBState & (SRB_START_+SRB_MSGOUT)) || - !(pSRB->SRBState & (SRB_DISCONNECT+SRB_COMPLETED)) ) - { /* Selection time out */ - pSRB->AdaptStatus = H_SEL_TIMEOUT; - pSRB->TargetStatus = 0; - goto disc1; - } - else if (!(pSRB->SRBState & SRB_DISCONNECT) && (pSRB->SRBState & SRB_COMPLETED)) - { -disc1: - dc390_freetag (pDCB, pSRB); - pDCB->pActiveSRB = NULL; - pSRB->SRBState = SRB_FREE; - dc390_SRBdone( pACB, pDCB, pSRB); - } - } - pACB->MsgLen = 0; -} - - -static void -dc390_Reselect( struct dc390_acb* pACB ) -{ - struct dc390_dcb* pDCB; - struct dc390_srb* pSRB; - u8 id, lun; - - DEBUG0(printk(KERN_INFO "RSEL,")); - pACB->Connected = 1; - pDCB = pACB->pActiveDCB; - if( pDCB ) - { /* Arbitration lost but Reselection won */ - DEBUG0(printk ("DC390: (ActiveDCB != 0: Arb. lost but resel. won)!\n")); - pSRB = pDCB->pActiveSRB; - if( !( pACB->scan_devices ) ) - { - struct scsi_cmnd *pcmd = pSRB->pcmd; - scsi_set_resid(pcmd, scsi_bufflen(pcmd)); - SET_RES_DID(pcmd->result, DID_SOFT_ERROR); - dc390_Going_remove(pDCB, pSRB); - dc390_Free_insert(pACB, pSRB); - pcmd->scsi_done (pcmd); - DEBUG0(printk(KERN_DEBUG"DC390: Return SRB %p to free\n", pSRB)); - } - } - /* Get ID */ - lun = DC390_read8 (ScsiFifo); - DEBUG0(printk ("Dev %02x,", lun)); - if (!(lun & (1 << pACB->pScsiHost->this_id))) - printk (KERN_ERR "DC390: Reselection must select host adapter: %02x!\n", lun); - else - lun ^= 1 << pACB->pScsiHost->this_id; /* Mask AdapterID */ - id = 0; while (lun >>= 1) id++; - /* Get LUN */ - lun = DC390_read8 (ScsiFifo); - if (!(lun & IDENTIFY_BASE)) printk (KERN_ERR "DC390: Resel: Expect identify message!\n"); - lun &= 7; - DEBUG0(printk ("(%02i-%i),", id, lun)); - pDCB = dc390_findDCB (pACB, id, lun); - if (!pDCB) - { - printk (KERN_ERR "DC390: Reselect from non existing device (%02i-%i)\n", - id, lun); - return; - } - pACB->pActiveDCB = pDCB; - /* TagQ: We expect a message soon, so never mind the exact SRB */ - if( pDCB->SyncMode & EN_TAG_QUEUEING ) - { - pSRB = pACB->pTmpSRB; - pDCB->pActiveSRB = pSRB; - } - else - { - pSRB = pDCB->pActiveSRB; - if( !pSRB || !(pSRB->SRBState & SRB_DISCONNECT) ) - { - pSRB= pACB->pTmpSRB; - pSRB->SRBState = SRB_UNEXPECT_RESEL; - printk (KERN_ERR "DC390: Reselect without outstanding cmnd (%02i-%i)\n", - id, lun); - pDCB->pActiveSRB = pSRB; - dc390_EnableMsgOut_Abort ( pACB, pSRB ); - } - else - { - if( pDCB->DCBFlag & ABORT_DEV_ ) - { - pSRB->SRBState = SRB_ABORT_SENT; - printk (KERN_INFO "DC390: Reselect: Abort (%02i-%i)\n", - id, lun); - dc390_EnableMsgOut_Abort( pACB, pSRB ); - } - else - pSRB->SRBState = SRB_DATA_XFER; - } - } - - DEBUG1(printk (KERN_DEBUG "Resel SRB(%p): TagNum (%02x)\n", pSRB, pSRB->TagNumber)); - pSRB->ScsiPhase = SCSI_NOP0; - DC390_write8 (Scsi_Dest_ID, pDCB->TargetID); - DC390_write8 (Sync_Period, pDCB->SyncPeriod); - DC390_write8 (Sync_Offset, pDCB->SyncOffset); - DC390_write8 (CtrlReg1, pDCB->CtrlR1); - DC390_write8 (CtrlReg3, pDCB->CtrlR3); - DC390_write8 (CtrlReg4, pDCB->CtrlR4); /* ; Glitch eater */ - DC390_write8 (ScsiCmd, MSG_ACCEPTED_CMD); /* ;to release the /ACK signal */ -} - -static int __inline__ -dc390_RequestSense(struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB) -{ - struct scsi_cmnd *pcmd; - - pcmd = pSRB->pcmd; - - REMOVABLEDEBUG(printk(KERN_INFO "DC390: RequestSense(Cmd %02x, Id %02x, LUN %02x)\n",\ - pcmd->cmnd[0], pDCB->TargetID, pDCB->TargetLUN)); - - pSRB->SRBFlag |= AUTO_REQSENSE; - pSRB->SavedTotXLen = pSRB->TotalXferredLen; - pSRB->AdaptStatus = 0; - pSRB->TargetStatus = 0; /* CHECK_CONDITION<<1; */ - - /* We are called from SRBdone, original PCI mapping has been removed - * already, new one is set up from StartSCSI */ - pSRB->SGIndex = 0; - - pSRB->TotalXferredLen = 0; - pSRB->SGToBeXferLen = 0; - return dc390_StartSCSI(pACB, pDCB, pSRB); -} - - -static void -dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB ) -{ - u8 status; - struct scsi_cmnd *pcmd; - - pcmd = pSRB->pcmd; - /* KG: Moved pci_unmap here */ - dc390_pci_unmap(pSRB); - - status = pSRB->TargetStatus; - - DEBUG0(printk (" SRBdone (%02x,%08x), SRB %p\n", status, pcmd->result, pSRB)); - if(pSRB->SRBFlag & AUTO_REQSENSE) - { /* Last command was a Request Sense */ - pSRB->SRBFlag &= ~AUTO_REQSENSE; - pSRB->AdaptStatus = 0; - pSRB->TargetStatus = SAM_STAT_CHECK_CONDITION; - - //pcmd->result = MK_RES(DRIVER_SENSE,DID_OK,0,status); - if (status == SAM_STAT_CHECK_CONDITION) - pcmd->result = MK_RES_LNX(0, DID_BAD_TARGET, 0, /*CHECK_CONDITION*/0); - else /* Retry */ - { - if( pSRB->pcmd->cmnd[0] == TEST_UNIT_READY /* || pSRB->pcmd->cmnd[0] == START_STOP */) - { - /* Don't retry on TEST_UNIT_READY */ - pcmd->result = MK_RES_LNX(DRIVER_SENSE, DID_OK, 0, SAM_STAT_CHECK_CONDITION); - REMOVABLEDEBUG(printk(KERN_INFO "Cmd=%02x, Result=%08x, XferL=%08x\n",pSRB->pcmd->cmnd[0],\ - (u32) pcmd->result, (u32) pSRB->TotalXferredLen)); - } else { - SET_RES_DRV(pcmd->result, DRIVER_SENSE); - //pSRB->ScsiCmdLen = (u8) (pSRB->Segment1[0] >> 8); - DEBUG0 (printk ("DC390: RETRY (%02x), target %02i-%02i\n", pcmd->cmnd[0], pcmd->device->id, (u8)pcmd->device->lun)); - pSRB->TotalXferredLen = 0; - SET_RES_DID(pcmd->result, DID_SOFT_ERROR); - } - } - goto cmd_done; - } - if( status ) - { - if (status == SAM_STAT_CHECK_CONDITION) - { - if (dc390_RequestSense(pACB, pDCB, pSRB)) { - SET_RES_DID(pcmd->result, DID_ERROR); - goto cmd_done; - } - return; - } - else if (status == SAM_STAT_TASK_SET_FULL) - { - scsi_track_queue_full(pcmd->device, pDCB->GoingSRBCnt - 1); - DEBUG0 (printk ("DC390: RETRY (%02x), target %02i-%02i\n", pcmd->cmnd[0], pcmd->device->id, (u8)pcmd->device->lun)); - pSRB->TotalXferredLen = 0; - SET_RES_DID(pcmd->result, DID_SOFT_ERROR); - } - else if (status == SAM_STAT_BUSY && - (pcmd->cmnd[0] == TEST_UNIT_READY || pcmd->cmnd[0] == INQUIRY) && - pACB->scan_devices) - { - pSRB->AdaptStatus = 0; - pSRB->TargetStatus = status; - pcmd->result = MK_RES(0,0,pSRB->EndMessage,/*status*/0); - } - else - { /* Another error */ - pSRB->TotalXferredLen = 0; - SET_RES_DID(pcmd->result, DID_SOFT_ERROR); - goto cmd_done; - } - } - else - { /* Target status == 0 */ - status = pSRB->AdaptStatus; - if (status == H_OVER_UNDER_RUN) - { - pSRB->TargetStatus = 0; - SET_RES_DID(pcmd->result,DID_OK); - SET_RES_MSG(pcmd->result,pSRB->EndMessage); - } - else if (status == H_SEL_TIMEOUT) - { - pcmd->result = MK_RES(0, DID_NO_CONNECT, 0, 0); - /* Devices are removed below ... */ - } - else if( pSRB->SRBStatus & PARITY_ERROR) - { - //pcmd->result = MK_RES(0,DID_PARITY,pSRB->EndMessage,0); - SET_RES_DID(pcmd->result,DID_PARITY); - SET_RES_MSG(pcmd->result,pSRB->EndMessage); - } - else /* No error */ - { - pSRB->AdaptStatus = 0; - pSRB->TargetStatus = 0; - SET_RES_DID(pcmd->result,DID_OK); - } - } - -cmd_done: - scsi_set_resid(pcmd, scsi_bufflen(pcmd) - pSRB->TotalXferredLen); - - dc390_Going_remove (pDCB, pSRB); - /* Add to free list */ - dc390_Free_insert (pACB, pSRB); - - DEBUG0(printk (KERN_DEBUG "DC390: SRBdone: done\n")); - pcmd->scsi_done (pcmd); - - return; -} - - -/* Remove all SRBs from Going list and inform midlevel */ -static void -dc390_DoingSRB_Done(struct dc390_acb* pACB, struct scsi_cmnd *cmd) -{ - struct dc390_dcb *pDCB, *pdcb; - struct dc390_srb *psrb, *psrb2; - int i; - struct scsi_cmnd *pcmd; - - pDCB = pACB->pLinkDCB; - pdcb = pDCB; - if (! pdcb) return; - do - { - psrb = pdcb->pGoingSRB; - for (i = 0; i < pdcb->GoingSRBCnt; i++) - { - psrb2 = psrb->pNextSRB; - pcmd = psrb->pcmd; - dc390_Free_insert (pACB, psrb); - psrb = psrb2; - } - pdcb->GoingSRBCnt = 0; - pdcb->pGoingSRB = NULL; - pdcb->TagMask = 0; - pdcb = pdcb->pNextDCB; - } while( pdcb != pDCB ); -} - - -static void -dc390_ResetSCSIBus( struct dc390_acb* pACB ) -{ - //DC390_write8 (ScsiCmd, RST_DEVICE_CMD); - //udelay (250); - //DC390_write8 (ScsiCmd, NOP_CMD); - - DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); - DC390_write8 (DMA_Cmd, DMA_IDLE_CMD); - DC390_write8 (ScsiCmd, RST_SCSI_BUS_CMD); - pACB->Connected = 0; - - return; -} - -static void -dc390_ScsiRstDetect( struct dc390_acb* pACB ) -{ - printk ("DC390: Rst_Detect: laststat = %08x\n", dc390_laststatus); - //DEBUG0(printk(KERN_INFO "RST_DETECT,")); - - DC390_write8 (DMA_Cmd, DMA_IDLE_CMD); - /* Unlock before ? */ - /* delay half a second */ - udelay (1000); - DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); - pACB->last_reset = jiffies + 5*HZ/2 - + HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY]; - pACB->Connected = 0; - - if( pACB->ACBFlag & RESET_DEV ) - pACB->ACBFlag |= RESET_DONE; - else - { /* Reset was issued by sb else */ - pACB->ACBFlag |= RESET_DETECT; - - dc390_ResetDevParam( pACB ); - dc390_DoingSRB_Done( pACB, NULL); - //dc390_RecoverSRB( pACB ); - pACB->pActiveDCB = NULL; - pACB->ACBFlag = 0; - } - return; -} - -static int DC390_queuecommand_lck(struct scsi_cmnd *cmd, - void (*done)(struct scsi_cmnd *)) -{ - struct scsi_device *sdev = cmd->device; - struct dc390_acb *acb = (struct dc390_acb *)sdev->host->hostdata; - struct dc390_dcb *dcb = sdev->hostdata; - struct dc390_srb *srb; - - if (sdev->queue_depth <= dcb->GoingSRBCnt) - goto device_busy; - if (acb->pActiveDCB) - goto host_busy; - if (acb->ACBFlag & (RESET_DETECT|RESET_DONE|RESET_DEV)) - goto host_busy; - - srb = acb->pFreeSRB; - if (unlikely(srb == NULL)) - goto host_busy; - - cmd->scsi_done = done; - cmd->result = 0; - acb->Cmds++; - - acb->pFreeSRB = srb->pNextSRB; - srb->pNextSRB = NULL; - - srb->pSRBDCB = dcb; - srb->pcmd = cmd; - cmd->host_scribble = (char *)srb; - - srb->SGIndex = 0; - srb->AdaptStatus = 0; - srb->TargetStatus = 0; - srb->MsgCnt = 0; - - srb->SRBStatus = 0; - srb->SRBFlag = 0; - srb->SRBState = 0; - srb->TotalXferredLen = 0; - srb->SGBusAddr = 0; - srb->SGToBeXferLen = 0; - srb->ScsiPhase = 0; - srb->EndMessage = 0; - srb->TagNumber = SCSI_NO_TAG; - - if (dc390_StartSCSI(acb, dcb, srb)) { - dc390_Free_insert(acb, srb); - goto host_busy; - } - - dc390_Going_append(dcb, srb); - - return 0; - - host_busy: - return SCSI_MLQUEUE_HOST_BUSY; - - device_busy: - return SCSI_MLQUEUE_DEVICE_BUSY; -} - -static DEF_SCSI_QCMD(DC390_queuecommand) - -static void dc390_dumpinfo (struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB) -{ - struct pci_dev *pdev; - u16 pstat; - - if (!pDCB) pDCB = pACB->pActiveDCB; - if (!pSRB && pDCB) pSRB = pDCB->pActiveSRB; - - if (pSRB) - { - printk ("DC390: SRB: Xferred %08lx, Remain %08lx, State %08x, Phase %02x\n", - pSRB->TotalXferredLen, pSRB->SGToBeXferLen, pSRB->SRBState, - pSRB->ScsiPhase); - printk ("DC390: AdpaterStatus: %02x, SRB Status %02x\n", pSRB->AdaptStatus, pSRB->SRBStatus); - } - printk ("DC390: Status of last IRQ (DMA/SC/Int/IRQ): %08x\n", dc390_laststatus); - printk ("DC390: Register dump: SCSI block:\n"); - printk ("DC390: XferCnt Cmd Stat IntS IRQS FFIS Ctl1 Ctl2 Ctl3 Ctl4\n"); - printk ("DC390: %06x %02x %02x %02x", - DC390_read8(CtcReg_Low) + (DC390_read8(CtcReg_Mid) << 8) + (DC390_read8(CtcReg_High) << 16), - DC390_read8(ScsiCmd), DC390_read8(Scsi_Status), DC390_read8(Intern_State)); - printk (" %02x %02x %02x %02x %02x %02x\n", - DC390_read8(INT_Status), DC390_read8(Current_Fifo), DC390_read8(CtrlReg1), - DC390_read8(CtrlReg2), DC390_read8(CtrlReg3), DC390_read8(CtrlReg4)); - DC390_write32 (DMA_ScsiBusCtrl, WRT_ERASE_DMA_STAT | EN_INT_ON_PCI_ABORT); - if (DC390_read8(Current_Fifo) & 0x1f) - { - printk ("DC390: FIFO:"); - while (DC390_read8(Current_Fifo) & 0x1f) printk (" %02x", DC390_read8(ScsiFifo)); - printk ("\n"); - } - printk ("DC390: Register dump: DMA engine:\n"); - printk ("DC390: Cmd STrCnt SBusA WrkBC WrkAC Stat SBusCtrl\n"); - printk ("DC390: %02x %08x %08x %08x %08x %02x %08x\n", - DC390_read8(DMA_Cmd), DC390_read32(DMA_XferCnt), DC390_read32(DMA_XferAddr), - DC390_read32(DMA_Wk_ByteCntr), DC390_read32(DMA_Wk_AddrCntr), - DC390_read8(DMA_Status), DC390_read32(DMA_ScsiBusCtrl)); - DC390_write32 (DMA_ScsiBusCtrl, EN_INT_ON_PCI_ABORT); - - pdev = pACB->pdev; - pci_read_config_word(pdev, PCI_STATUS, &pstat); - printk ("DC390: Register dump: PCI Status: %04x\n", pstat); - printk ("DC390: In case of driver trouble read Documentation/scsi/tmscsim.txt\n"); -} - - -static int DC390_abort(struct scsi_cmnd *cmd) -{ - struct dc390_acb *pACB = (struct dc390_acb*) cmd->device->host->hostdata; - struct dc390_dcb *pDCB = (struct dc390_dcb*) cmd->device->hostdata; - - scmd_printk(KERN_WARNING, cmd, "DC390: Abort command\n"); - - /* abort() is too stupid for already sent commands at the moment. - * If it's called we are in trouble anyway, so let's dump some info - * into the syslog at least. (KG, 98/08/20,99/06/20) */ - dc390_dumpinfo(pACB, pDCB, NULL); - - pDCB->DCBFlag |= ABORT_DEV_; - printk(KERN_INFO "DC390: Aborted.\n"); - - return FAILED; -} - - -static void dc390_ResetDevParam( struct dc390_acb* pACB ) -{ - struct dc390_dcb *pDCB, *pdcb; - - pDCB = pACB->pLinkDCB; - if (! pDCB) return; - pdcb = pDCB; - do - { - pDCB->SyncMode &= ~SYNC_NEGO_DONE; - pDCB->SyncPeriod = 0; - pDCB->SyncOffset = 0; - pDCB->TagMask = 0; - pDCB->CtrlR3 = FAST_CLK; - pDCB->CtrlR4 &= NEGATE_REQACKDATA | CTRL4_RESERVED | NEGATE_REQACK; - pDCB->CtrlR4 |= pACB->glitch_cfg; - pDCB = pDCB->pNextDCB; - } - while( pdcb != pDCB ); - pACB->ACBFlag &= ~(RESET_DEV | RESET_DONE | RESET_DETECT); - -} - -static int DC390_bus_reset (struct scsi_cmnd *cmd) -{ - struct dc390_acb* pACB = (struct dc390_acb*) cmd->device->host->hostdata; - u8 bval; - - spin_lock_irq(cmd->device->host->host_lock); - - bval = DC390_read8(CtrlReg1) | DIS_INT_ON_SCSI_RST; - DC390_write8(CtrlReg1, bval); /* disable IRQ on bus reset */ - - pACB->ACBFlag |= RESET_DEV; - dc390_ResetSCSIBus(pACB); - - dc390_ResetDevParam(pACB); - mdelay(1); - pACB->last_reset = jiffies + 3*HZ/2 - + HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY]; - - DC390_write8(ScsiCmd, CLEAR_FIFO_CMD); - DC390_read8(INT_Status); /* Reset Pending INT */ - - dc390_DoingSRB_Done(pACB, cmd); - - pACB->pActiveDCB = NULL; - pACB->ACBFlag = 0; - - bval = DC390_read8(CtrlReg1) & ~DIS_INT_ON_SCSI_RST; - DC390_write8(CtrlReg1, bval); /* re-enable interrupt */ - - spin_unlock_irq(cmd->device->host->host_lock); - - return SUCCESS; -} - -/** - * dc390_slave_alloc - Called by the scsi mid layer to tell us about a new - * scsi device that we need to deal with. - * - * @scsi_device: The new scsi device that we need to handle. - */ -static int dc390_slave_alloc(struct scsi_device *scsi_device) -{ - struct dc390_acb *pACB = (struct dc390_acb*) scsi_device->host->hostdata; - struct dc390_dcb *pDCB, *pDCB2 = NULL; - uint id = scsi_device->id; - uint lun = scsi_device->lun; - - pDCB = kzalloc(sizeof(struct dc390_dcb), GFP_KERNEL); - if (!pDCB) - return -ENOMEM; - - if (!pACB->DCBCnt++) { - pACB->pLinkDCB = pDCB; - pACB->pDCBRunRobin = pDCB; - } else { - pACB->pLastDCB->pNextDCB = pDCB; - } - - pDCB->pNextDCB = pACB->pLinkDCB; - pACB->pLastDCB = pDCB; - - pDCB->pDCBACB = pACB; - pDCB->TargetID = id; - pDCB->TargetLUN = lun; - - /* - * Some values are for all LUNs: Copy them - * In a clean way: We would have an own structure for a SCSI-ID - */ - if (lun && (pDCB2 = dc390_findDCB(pACB, id, 0))) { - pDCB->DevMode = pDCB2->DevMode; - pDCB->SyncMode = pDCB2->SyncMode & SYNC_NEGO_DONE; - pDCB->SyncPeriod = pDCB2->SyncPeriod; - pDCB->SyncOffset = pDCB2->SyncOffset; - pDCB->NegoPeriod = pDCB2->NegoPeriod; - - pDCB->CtrlR3 = pDCB2->CtrlR3; - pDCB->CtrlR4 = pDCB2->CtrlR4; - } else { - u8 index = pACB->AdapterIndex; - PEEprom prom = (PEEprom) &dc390_eepromBuf[index][id << 2]; - - pDCB->DevMode = prom->EE_MODE1; - pDCB->NegoPeriod = - (dc390_clock_period1[prom->EE_SPEED] * 25) >> 2; - pDCB->CtrlR3 = FAST_CLK; - pDCB->CtrlR4 = pACB->glitch_cfg | CTRL4_RESERVED; - if (dc390_eepromBuf[index][EE_MODE2] & ACTIVE_NEGATION) - pDCB->CtrlR4 |= NEGATE_REQACKDATA | NEGATE_REQACK; - } - - if (pDCB->DevMode & SYNC_NEGO_) - pDCB->SyncMode |= SYNC_ENABLE; - else { - pDCB->SyncMode = 0; - pDCB->SyncOffset &= ~0x0f; - } - - pDCB->CtrlR1 = pACB->pScsiHost->this_id; - if (pDCB->DevMode & PARITY_CHK_) - pDCB->CtrlR1 |= PARITY_ERR_REPO; - - pACB->scan_devices = 1; - scsi_device->hostdata = pDCB; - return 0; -} - -/** - * dc390_slave_destroy - Called by the scsi mid layer to tell us about a - * device that is going away. - * - * @scsi_device: The scsi device that we need to remove. - */ -static void dc390_slave_destroy(struct scsi_device *scsi_device) -{ - struct dc390_acb* pACB = (struct dc390_acb*) scsi_device->host->hostdata; - struct dc390_dcb* pDCB = (struct dc390_dcb*) scsi_device->hostdata; - struct dc390_dcb* pPrevDCB = pACB->pLinkDCB; - - pACB->scan_devices = 0; - - BUG_ON(pDCB->GoingSRBCnt > 1); - - if (pDCB == pACB->pLinkDCB) { - if (pACB->pLastDCB == pDCB) { - pDCB->pNextDCB = NULL; - pACB->pLastDCB = NULL; - } - pACB->pLinkDCB = pDCB->pNextDCB; - } else { - while (pPrevDCB->pNextDCB != pDCB) - pPrevDCB = pPrevDCB->pNextDCB; - pPrevDCB->pNextDCB = pDCB->pNextDCB; - if (pDCB == pACB->pLastDCB) - pACB->pLastDCB = pPrevDCB; - } - - if (pDCB == pACB->pActiveDCB) - pACB->pActiveDCB = NULL; - if (pDCB == pACB->pLinkDCB) - pACB->pLinkDCB = pDCB->pNextDCB; - if (pDCB == pACB->pDCBRunRobin) - pACB->pDCBRunRobin = pDCB->pNextDCB; - kfree(pDCB); - - pACB->DCBCnt--; -} - -static int dc390_slave_configure(struct scsi_device *sdev) -{ - struct dc390_acb *acb = (struct dc390_acb *)sdev->host->hostdata; - struct dc390_dcb *dcb = (struct dc390_dcb *)sdev->hostdata; - - acb->scan_devices = 0; - - /* - * XXX: Note that while this driver used to called scsi_activate_tcq, - * it never actually set a tag type, so emulate the old behavior. - */ - scsi_set_tag_type(sdev, 0); - - if (sdev->tagged_supported && (dcb->DevMode & TAG_QUEUEING_)) { - dcb->SyncMode |= EN_TAG_QUEUEING; - scsi_change_queue_depth(sdev, acb->TagMaxNum); - } - - return 0; -} - -static struct scsi_host_template driver_template = { - .module = THIS_MODULE, - .proc_name = "tmscsim", - .name = DC390_BANNER " V" DC390_VERSION, - .slave_alloc = dc390_slave_alloc, - .slave_configure = dc390_slave_configure, - .slave_destroy = dc390_slave_destroy, - .queuecommand = DC390_queuecommand, - .eh_abort_handler = DC390_abort, - .eh_bus_reset_handler = DC390_bus_reset, - .can_queue = 1, - .this_id = 7, - .sg_tablesize = SG_ALL, - .cmd_per_lun = 1, - .use_clustering = ENABLE_CLUSTERING, - .max_sectors = 0x4000, /* 8MiB = 16 * 1024 * 512 */ - .use_blk_tags = 1, -}; - -/*********************************************************************** - * Functions for access to DC390 EEPROM - * and some to emulate it - * - **********************************************************************/ - -static void dc390_eeprom_prepare_read(struct pci_dev *pdev, u8 cmd) -{ - u8 carryFlag = 1, j = 0x80, bval; - int i; - - for (i = 0; i < 9; i++) { - if (carryFlag) { - pci_write_config_byte(pdev, 0x80, 0x40); - bval = 0xc0; - } else - bval = 0x80; - - udelay(160); - pci_write_config_byte(pdev, 0x80, bval); - udelay(160); - pci_write_config_byte(pdev, 0x80, 0); - udelay(160); - - carryFlag = (cmd & j) ? 1 : 0; - j >>= 1; - } -} - -static u16 dc390_eeprom_get_data(struct pci_dev *pdev) -{ - int i; - u16 wval = 0; - u8 bval; - - for (i = 0; i < 16; i++) { - wval <<= 1; - - pci_write_config_byte(pdev, 0x80, 0x80); - udelay(160); - pci_write_config_byte(pdev, 0x80, 0x40); - udelay(160); - pci_read_config_byte(pdev, 0x00, &bval); - - if (bval == 0x22) - wval |= 1; - } - - return wval; -} - -static void dc390_read_eeprom(struct pci_dev *pdev, u16 *ptr) -{ - u8 cmd = EEPROM_READ, i; - - for (i = 0; i < 0x40; i++) { - pci_write_config_byte(pdev, 0xc0, 0); - udelay(160); - - dc390_eeprom_prepare_read(pdev, cmd++); - *ptr++ = dc390_eeprom_get_data(pdev); - - pci_write_config_byte(pdev, 0x80, 0); - pci_write_config_byte(pdev, 0x80, 0); - udelay(160); - } -} - -/* Override EEprom values with explicitly set values */ -static void dc390_eeprom_override(u8 index) -{ - u8 *ptr = (u8 *) dc390_eepromBuf[index], id; - - /* Adapter Settings */ - if (tmscsim[0] != -2) - ptr[EE_ADAPT_SCSI_ID] = (u8)tmscsim[0]; /* Adapter ID */ - if (tmscsim[3] != -2) - ptr[EE_MODE2] = (u8)tmscsim[3]; - if (tmscsim[5] != -2) - ptr[EE_DELAY] = tmscsim[5]; /* Reset delay */ - if (tmscsim[4] != -2) - ptr[EE_TAG_CMD_NUM] = (u8)tmscsim[4]; /* Tagged Cmds */ - - /* Device Settings */ - for (id = 0; id < MAX_SCSI_ID; id++) { - if (tmscsim[2] != -2) - ptr[id << 2] = (u8)tmscsim[2]; /* EE_MODE1 */ - if (tmscsim[1] != -2) - ptr[(id << 2) + 1] = (u8)tmscsim[1]; /* EE_Speed */ - } -} - -static int tmscsim_def[] = { - 7, - 0 /* 10MHz */, - PARITY_CHK_ | SEND_START_ | EN_DISCONNECT_ | SYNC_NEGO_ | TAG_QUEUEING_, - MORE2_DRV | GREATER_1G | RST_SCSI_BUS | ACTIVE_NEGATION | LUN_CHECK, - 3 /* 16 Tags per LUN */, - 1 /* s delay after Reset */, -}; - -/* Copy defaults over set values where missing */ -static void dc390_fill_with_defaults (void) -{ - int i; - - for (i = 0; i < 6; i++) { - if (tmscsim[i] < 0 || tmscsim[i] > 255) - tmscsim[i] = tmscsim_def[i]; - } - - /* Sanity checks */ - if (tmscsim[0] > 7) - tmscsim[0] = 7; - if (tmscsim[1] > 7) - tmscsim[1] = 4; - if (tmscsim[4] > 5) - tmscsim[4] = 4; - if (tmscsim[5] > 180) - tmscsim[5] = 180; -} - -static void dc390_check_eeprom(struct pci_dev *pdev, u8 index) -{ - u8 interpd[] = {1, 3, 5, 10, 16, 30, 60, 120}; - u8 EEbuf[128]; - u16 *ptr = (u16 *)EEbuf, wval = 0; - int i; - - dc390_read_eeprom(pdev, ptr); - memcpy(dc390_eepromBuf[index], EEbuf, EE_ADAPT_SCSI_ID); - memcpy(&dc390_eepromBuf[index][EE_ADAPT_SCSI_ID], - &EEbuf[REAL_EE_ADAPT_SCSI_ID], EE_LEN - EE_ADAPT_SCSI_ID); - - dc390_eepromBuf[index][EE_DELAY] = interpd[dc390_eepromBuf[index][EE_DELAY]]; - - for (i = 0; i < 0x40; i++, ptr++) - wval += *ptr; - - /* no Tekram EEprom found */ - if (wval != 0x1234) { - int speed; - - printk(KERN_INFO "DC390_init: No EEPROM found! Trying default settings ...\n"); - - /* - * XXX(hch): bogus, because we might have tekram and - * non-tekram hbas in a single machine. - */ - dc390_fill_with_defaults(); - - speed = dc390_clock_speed[tmscsim[1]]; - printk(KERN_INFO "DC390: Used defaults: AdaptID=%i, SpeedIdx=%i (%i.%i MHz), " - "DevMode=0x%02x, AdaptMode=0x%02x, TaggedCmnds=%i (%i), DelayReset=%is\n", - tmscsim[0], tmscsim[1], speed / 10, speed % 10, - (u8)tmscsim[2], (u8)tmscsim[3], tmscsim[4], 2 << (tmscsim[4]), tmscsim[5]); - } -} - -static void dc390_init_hw(struct dc390_acb *pACB, u8 index) -{ - struct Scsi_Host *shost = pACB->pScsiHost; - u8 dstate; - - /* Disable SCSI bus reset interrupt */ - DC390_write8(CtrlReg1, DIS_INT_ON_SCSI_RST | shost->this_id); - - if (pACB->Gmode2 & RST_SCSI_BUS) { - dc390_ResetSCSIBus(pACB); - udelay(1000); - pACB->last_reset = jiffies + HZ/2 + - HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY]; - } - - pACB->ACBFlag = 0; - - /* Reset Pending INT */ - DC390_read8(INT_Status); - - /* 250ms selection timeout */ - DC390_write8(Scsi_TimeOut, SEL_TIMEOUT); - - /* Conversion factor = 0 , 40MHz clock */ - DC390_write8(Clk_Factor, CLK_FREQ_40MHZ); - - /* NOP cmd - clear command register */ - DC390_write8(ScsiCmd, NOP_CMD); - - /* Enable Feature and SCSI-2 */ - DC390_write8(CtrlReg2, EN_FEATURE+EN_SCSI2_CMD); - - /* Fast clock */ - DC390_write8(CtrlReg3, FAST_CLK); - - /* Negation */ - DC390_write8(CtrlReg4, pACB->glitch_cfg | /* glitch eater */ - (dc390_eepromBuf[index][EE_MODE2] & ACTIVE_NEGATION) ? - NEGATE_REQACKDATA : 0); - - /* Clear Transfer Count High: ID */ - DC390_write8(CtcReg_High, 0); - DC390_write8(DMA_Cmd, DMA_IDLE_CMD); - DC390_write8(ScsiCmd, CLEAR_FIFO_CMD); - DC390_write32(DMA_ScsiBusCtrl, EN_INT_ON_PCI_ABORT); - - dstate = DC390_read8(DMA_Status); - DC390_write8(DMA_Status, dstate); -} - -static int dc390_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) -{ - struct dc390_acb *pACB; - struct Scsi_Host *shost; - unsigned long io_port; - int error = -ENODEV, i; - - if (pci_enable_device(pdev)) - goto out; - - pci_set_master(pdev); - - error = -ENOMEM; - if (disable_clustering) - driver_template.use_clustering = DISABLE_CLUSTERING; - shost = scsi_host_alloc(&driver_template, sizeof(struct dc390_acb)); - if (!shost) - goto out_disable_device; - - pACB = (struct dc390_acb *)shost->hostdata; - memset(pACB, 0, sizeof(struct dc390_acb)); - - dc390_check_eeprom(pdev, dc390_adapterCnt); - dc390_eeprom_override(dc390_adapterCnt); - - io_port = pci_resource_start(pdev, 0); - - shost->this_id = dc390_eepromBuf[dc390_adapterCnt][EE_ADAPT_SCSI_ID]; - shost->io_port = io_port; - shost->n_io_port = 0x80; - shost->irq = pdev->irq; - shost->base = io_port; - shost->unique_id = io_port; - - pACB->last_reset = jiffies; - pACB->pScsiHost = shost; - pACB->IOPortBase = (u16) io_port; - pACB->IRQLevel = pdev->irq; - - shost->max_id = 8; - - if (shost->max_id - 1 == - dc390_eepromBuf[dc390_adapterCnt][EE_ADAPT_SCSI_ID]) - shost->max_id--; - - if (dc390_eepromBuf[dc390_adapterCnt][EE_MODE2] & LUN_CHECK) - shost->max_lun = 8; - else - shost->max_lun = 1; - - pACB->pFreeSRB = pACB->SRB_array; - pACB->SRBCount = MAX_SRB_CNT; - pACB->AdapterIndex = dc390_adapterCnt; - pACB->TagMaxNum = - 2 << dc390_eepromBuf[dc390_adapterCnt][EE_TAG_CMD_NUM]; - pACB->Gmode2 = dc390_eepromBuf[dc390_adapterCnt][EE_MODE2]; - - for (i = 0; i < pACB->SRBCount-1; i++) - pACB->SRB_array[i].pNextSRB = &pACB->SRB_array[i+1]; - pACB->SRB_array[pACB->SRBCount-1].pNextSRB = NULL; - pACB->pTmpSRB = &pACB->TmpSRB; - - pACB->sel_timeout = SEL_TIMEOUT; - pACB->glitch_cfg = EATER_25NS; - pACB->pdev = pdev; - - if (!request_region(io_port, shost->n_io_port, "tmscsim")) { - printk(KERN_ERR "DC390: register IO ports error!\n"); - goto out_host_put; - } - - /* Reset Pending INT */ - DC390_read8_(INT_Status, io_port); - - if (request_irq(pdev->irq, do_DC390_Interrupt, IRQF_SHARED, - "tmscsim", pACB)) { - printk(KERN_ERR "DC390: register IRQ error!\n"); - goto out_release_region; - } - - dc390_init_hw(pACB, dc390_adapterCnt); - - dc390_adapterCnt++; - - pci_set_drvdata(pdev, shost); - - error = scsi_add_host(shost, &pdev->dev); - if (error) - goto out_free_irq; - scsi_scan_host(shost); - return 0; - - out_free_irq: - free_irq(pdev->irq, pACB); - out_release_region: - release_region(io_port, shost->n_io_port); - out_host_put: - scsi_host_put(shost); - out_disable_device: - pci_disable_device(pdev); - out: - return error; -} - -/** - * dc390_remove_one - Called to remove a single instance of the adapter. - * - * @dev: The PCI device to remove. - */ -static void dc390_remove_one(struct pci_dev *dev) -{ - struct Scsi_Host *scsi_host = pci_get_drvdata(dev); - unsigned long iflags; - struct dc390_acb* pACB = (struct dc390_acb*) scsi_host->hostdata; - u8 bval; - - scsi_remove_host(scsi_host); - - spin_lock_irqsave(scsi_host->host_lock, iflags); - pACB->ACBFlag = RESET_DEV; - bval = DC390_read8(CtrlReg1) | DIS_INT_ON_SCSI_RST; - DC390_write8 (CtrlReg1, bval); /* disable interrupt */ - if (pACB->Gmode2 & RST_SCSI_BUS) - dc390_ResetSCSIBus(pACB); - spin_unlock_irqrestore(scsi_host->host_lock, iflags); - - free_irq(scsi_host->irq, pACB); - release_region(scsi_host->io_port, scsi_host->n_io_port); - - pci_disable_device(dev); - scsi_host_put(scsi_host); -} - -static struct pci_device_id tmscsim_pci_tbl[] = { - { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD53C974, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, - { } -}; -MODULE_DEVICE_TABLE(pci, tmscsim_pci_tbl); - -static struct pci_driver dc390_driver = { - .name = "tmscsim", - .id_table = tmscsim_pci_tbl, - .probe = dc390_probe_one, - .remove = dc390_remove_one, -}; - -static int __init dc390_module_init(void) -{ - if (!disable_clustering) { - printk(KERN_INFO "DC390: clustering now enabled by default. If you get problems load\n"); - printk(KERN_INFO " with \"disable_clustering=1\" and report to maintainers\n"); - } - - if (tmscsim[0] == -1 || tmscsim[0] > 15) { - tmscsim[0] = 7; - tmscsim[1] = 4; - tmscsim[2] = PARITY_CHK_ | TAG_QUEUEING_; - tmscsim[3] = MORE2_DRV | GREATER_1G | RST_SCSI_BUS | ACTIVE_NEGATION; - tmscsim[4] = 2; - tmscsim[5] = 10; - printk (KERN_INFO "DC390: Using safe settings.\n"); - } - - return pci_register_driver(&dc390_driver); -} - -static void __exit dc390_module_exit(void) -{ - pci_unregister_driver(&dc390_driver); -} - -module_init(dc390_module_init); -module_exit(dc390_module_exit); - -#ifndef MODULE -static int __init dc390_setup (char *str) -{ - int ints[8],i, im; - - get_options(str, ARRAY_SIZE(ints), ints); - im = ints[0]; - - if (im > 6) { - printk (KERN_NOTICE "DC390: ignore extra params!\n"); - im = 6; - } - - for (i = 0; i < im; i++) - tmscsim[i] = ints[i+1]; - /* dc390_checkparams (); */ - return 1; -} - -__setup("tmscsim=", dc390_setup); -#endif diff --git a/drivers/scsi/tmscsim.h b/drivers/scsi/tmscsim.h deleted file mode 100644 index c9ad4bb77098..000000000000 --- a/drivers/scsi/tmscsim.h +++ /dev/null @@ -1,549 +0,0 @@ -/*********************************************************************** -;* File Name : TMSCSIM.H * -;* TEKRAM DC-390(T) PCI SCSI Bus Master Host Adapter * -;* Device Driver * -;***********************************************************************/ -/* $Id: tmscsim.h,v 2.15.2.3 2000/11/17 20:52:27 garloff Exp $ */ - -#ifndef _TMSCSIM_H -#define _TMSCSIM_H - -#include - -#define MAX_ADAPTER_NUM 4 -#define MAX_SG_LIST_BUF 16 /* Not used */ -#define MAX_SCSI_ID 8 -#define MAX_SRB_CNT 50 /* Max number of started commands */ - -#define SEL_TIMEOUT 153 /* 250 ms selection timeout (@ 40 MHz) */ - -/* -;----------------------------------------------------------------------- -; SCSI Request Block -;----------------------------------------------------------------------- -*/ -struct dc390_srb -{ -//u8 CmdBlock[12]; - -struct dc390_srb *pNextSRB; -struct dc390_dcb *pSRBDCB; -struct scsi_cmnd *pcmd; -struct scatterlist *pSegmentList; - -struct scatterlist Segmentx; /* make a one entry of S/G list table */ - -unsigned long SGBusAddr; /*;a segment starting address as seen by AM53C974A - in CPU endianness. We're only getting 32-bit bus - addresses by default */ -unsigned long SGToBeXferLen; /*; to be xfer length */ -unsigned long TotalXferredLen; -unsigned long SavedTotXLen; -unsigned long Saved_Ptr; -u32 SRBState; - -u8 SRBStatus; -u8 SRBFlag; /*; b0-AutoReqSense,b6-Read,b7-write */ - /*; b4-settimeout,b5-Residual valid */ -u8 AdaptStatus; -u8 TargetStatus; - -u8 ScsiPhase; -s8 TagNumber; -u8 SGIndex; -u8 SGcount; - -u8 MsgCnt; -u8 EndMessage; - -u8 MsgInBuf[6]; -u8 MsgOutBuf[6]; - -//u8 IORBFlag; /*;81h-Reset, 2-retry */ -}; - - -/* -;----------------------------------------------------------------------- -; Device Control Block -;----------------------------------------------------------------------- -*/ -struct dc390_dcb -{ -struct dc390_dcb *pNextDCB; -struct dc390_acb *pDCBACB; - -/* Queued SRBs */ -struct dc390_srb *pGoingSRB; -struct dc390_srb *pGoingLast; -struct dc390_srb *pActiveSRB; -u8 GoingSRBCnt; - -u32 TagMask; - -u8 TargetID; /*; SCSI Target ID (SCSI Only) */ -u8 TargetLUN; /*; SCSI Log. Unit (SCSI Only) */ -u8 DevMode; -u8 DCBFlag; - -u8 CtrlR1; -u8 CtrlR3; -u8 CtrlR4; - -u8 SyncMode; /*; 0:async mode */ -u8 NegoPeriod; /*;for nego. */ -u8 SyncPeriod; /*;for reg. */ -u8 SyncOffset; /*;for reg. and nego.(low nibble) */ -}; - - -/* -;----------------------------------------------------------------------- -; Adapter Control Block -;----------------------------------------------------------------------- -*/ -struct dc390_acb -{ -struct Scsi_Host *pScsiHost; -u16 IOPortBase; -u8 IRQLevel; -u8 status; - -u8 SRBCount; -u8 AdapterIndex; /*; nth Adapter this driver */ -u8 DCBCnt; - -u8 TagMaxNum; -u8 ACBFlag; -u8 Gmode2; -u8 scan_devices; - -struct dc390_dcb *pLinkDCB; -struct dc390_dcb *pLastDCB; -struct dc390_dcb *pDCBRunRobin; - -struct dc390_dcb *pActiveDCB; -struct dc390_srb *pFreeSRB; -struct dc390_srb *pTmpSRB; - -u8 msgin123[4]; -u8 Connected; -u8 pad; - -#if defined(USE_SPINLOCKS) && USE_SPINLOCKS > 1 && (defined(CONFIG_SMP) || DEBUG_SPINLOCKS > 0) -spinlock_t lock; -#endif -u8 sel_timeout; -u8 glitch_cfg; - -u8 MsgLen; -u8 Ignore_IRQ; /* Not used */ - -struct pci_dev *pdev; - -unsigned long last_reset; -unsigned long Cmds; -u32 SelLost; -u32 SelConn; -u32 CmdInQ; -u32 CmdOutOfSRB; - -struct dc390_srb TmpSRB; -struct dc390_srb SRB_array[MAX_SRB_CNT]; /* 50 SRBs */ -}; - - -/*;-----------------------------------------------------------------------*/ - - -#define BIT31 0x80000000 -#define BIT30 0x40000000 -#define BIT29 0x20000000 -#define BIT28 0x10000000 -#define BIT27 0x08000000 -#define BIT26 0x04000000 -#define BIT25 0x02000000 -#define BIT24 0x01000000 -#define BIT23 0x00800000 -#define BIT22 0x00400000 -#define BIT21 0x00200000 -#define BIT20 0x00100000 -#define BIT19 0x00080000 -#define BIT18 0x00040000 -#define BIT17 0x00020000 -#define BIT16 0x00010000 -#define BIT15 0x00008000 -#define BIT14 0x00004000 -#define BIT13 0x00002000 -#define BIT12 0x00001000 -#define BIT11 0x00000800 -#define BIT10 0x00000400 -#define BIT9 0x00000200 -#define BIT8 0x00000100 -#define BIT7 0x00000080 -#define BIT6 0x00000040 -#define BIT5 0x00000020 -#define BIT4 0x00000010 -#define BIT3 0x00000008 -#define BIT2 0x00000004 -#define BIT1 0x00000002 -#define BIT0 0x00000001 - -/*;---UnitCtrlFlag */ -#define UNIT_ALLOCATED BIT0 -#define UNIT_INFO_CHANGED BIT1 -#define FORMATING_MEDIA BIT2 -#define UNIT_RETRY BIT3 - -/*;---UnitFlags */ -#define DASD_SUPPORT BIT0 -#define SCSI_SUPPORT BIT1 -#define ASPI_SUPPORT BIT2 - -/*;----SRBState machine definition */ -#define SRB_FREE 0 -#define SRB_WAIT BIT0 -#define SRB_READY BIT1 -#define SRB_MSGOUT BIT2 /*;arbitration+msg_out 1st byte*/ -#define SRB_MSGIN BIT3 -#define SRB_MSGIN_MULTI BIT4 -#define SRB_COMMAND BIT5 -#define SRB_START_ BIT6 /*;arbitration+msg_out+command_out*/ -#define SRB_DISCONNECT BIT7 -#define SRB_DATA_XFER BIT8 -#define SRB_XFERPAD BIT9 -#define SRB_STATUS BIT10 -#define SRB_COMPLETED BIT11 -#define SRB_ABORT_SENT BIT12 -#define DO_SYNC_NEGO BIT13 -#define SRB_UNEXPECT_RESEL BIT14 - -/*;---SRBstatus */ -#define SRB_OK BIT0 -#define ABORTION BIT1 -#define OVER_RUN BIT2 -#define UNDER_RUN BIT3 -#define PARITY_ERROR BIT4 -#define SRB_ERROR BIT5 - -/*;---ACBFlag */ -#define RESET_DEV BIT0 -#define RESET_DETECT BIT1 -#define RESET_DONE BIT2 - -/*;---DCBFlag */ -#define ABORT_DEV_ BIT0 - -/*;---SRBFlag */ -#define DATAOUT BIT7 -#define DATAIN BIT6 -#define RESIDUAL_VALID BIT5 -#define ENABLE_TIMER BIT4 -#define RESET_DEV0 BIT2 -#define ABORT_DEV BIT1 -#define AUTO_REQSENSE BIT0 - -/*;---Adapter status */ -#define H_STATUS_GOOD 0 -#define H_SEL_TIMEOUT 0x11 -#define H_OVER_UNDER_RUN 0x12 -#define H_UNEXP_BUS_FREE 0x13 -#define H_TARGET_PHASE_F 0x14 -#define H_INVALID_CCB_OP 0x16 -#define H_LINK_CCB_BAD 0x17 -#define H_BAD_TARGET_DIR 0x18 -#define H_DUPLICATE_CCB 0x19 -#define H_BAD_CCB_OR_SG 0x1A -#define H_ABORT 0x0FF - -/* cmd->result */ -#define RES_TARGET 0x000000FF /* Target State */ -#define RES_TARGET_LNX STATUS_MASK /* Only official ... */ -#define RES_ENDMSG 0x0000FF00 /* End Message */ -#define RES_DID 0x00FF0000 /* DID_ codes */ -#define RES_DRV 0xFF000000 /* DRIVER_ codes */ - -#define MK_RES(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt)) -#define MK_RES_LNX(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt)) - -#define SET_RES_TARGET(who, tgt) do { who &= ~RES_TARGET; who |= (int)(tgt); } while (0) -#define SET_RES_TARGET_LNX(who, tgt) do { who &= ~RES_TARGET_LNX; who |= (int)(tgt) << 1; } while (0) -#define SET_RES_MSG(who, msg) do { who &= ~RES_ENDMSG; who |= (int)(msg) << 8; } while (0) -#define SET_RES_DID(who, did) do { who &= ~RES_DID; who |= (int)(did) << 16; } while (0) -#define SET_RES_DRV(who, drv) do { who &= ~RES_DRV; who |= (int)(drv) << 24; } while (0) - -/*;---Sync_Mode */ -#define SYNC_DISABLE 0 -#define SYNC_ENABLE BIT0 -#define SYNC_NEGO_DONE BIT1 -#define WIDE_ENABLE BIT2 /* Not used ;-) */ -#define WIDE_NEGO_DONE BIT3 /* Not used ;-) */ -#define EN_TAG_QUEUEING BIT4 -#define EN_ATN_STOP BIT5 - -#define SYNC_NEGO_OFFSET 15 - -/*;---SCSI bus phase*/ -#define SCSI_DATA_OUT 0 -#define SCSI_DATA_IN 1 -#define SCSI_COMMAND 2 -#define SCSI_STATUS_ 3 -#define SCSI_NOP0 4 -#define SCSI_NOP1 5 -#define SCSI_MSG_OUT 6 -#define SCSI_MSG_IN 7 - -/*;----SCSI MSG BYTE*/ /* see scsi/scsi.h */ /* One is missing ! */ -#define ABORT_TAG 0x0d - -/* - * SISC query queue - */ -typedef struct { - dma_addr_t saved_dma_handle; -} dc390_cmd_scp_t; - -/* -;========================================================== -; EEPROM byte offset -;========================================================== -*/ -typedef struct _EEprom -{ -u8 EE_MODE1; -u8 EE_SPEED; -u8 xx1; -u8 xx2; -} EEprom, *PEEprom; - -#define REAL_EE_ADAPT_SCSI_ID 64 -#define REAL_EE_MODE2 65 -#define REAL_EE_DELAY 66 -#define REAL_EE_TAG_CMD_NUM 67 - -#define EE_ADAPT_SCSI_ID 32 -#define EE_MODE2 33 -#define EE_DELAY 34 -#define EE_TAG_CMD_NUM 35 - -#define EE_LEN 40 - -/*; EE_MODE1 bits definition*/ -#define PARITY_CHK_ BIT0 -#define SYNC_NEGO_ BIT1 -#define EN_DISCONNECT_ BIT2 -#define SEND_START_ BIT3 -#define TAG_QUEUEING_ BIT4 - -/*; EE_MODE2 bits definition*/ -#define MORE2_DRV BIT0 -#define GREATER_1G BIT1 -#define RST_SCSI_BUS BIT2 -#define ACTIVE_NEGATION BIT3 -#define NO_SEEK BIT4 -#define LUN_CHECK BIT5 - -#define ENABLE_CE 1 -#define DISABLE_CE 0 -#define EEPROM_READ 0x80 - -/* -;========================================================== -; AMD 53C974 Registers bit Definition -;========================================================== -*/ -/* -;==================== -; SCSI Register -;==================== -*/ - -/*; Command Reg.(+0CH) (rw) */ -#define DMA_COMMAND BIT7 -#define NOP_CMD 0 -#define CLEAR_FIFO_CMD 1 -#define RST_DEVICE_CMD 2 -#define RST_SCSI_BUS_CMD 3 - -#define INFO_XFER_CMD 0x10 -#define INITIATOR_CMD_CMPLTE 0x11 -#define MSG_ACCEPTED_CMD 0x12 -#define XFER_PAD_BYTE 0x18 -#define SET_ATN_CMD 0x1A -#define RESET_ATN_CMD 0x1B - -#define SEL_WO_ATN 0x41 /* currently not used */ -#define SEL_W_ATN 0x42 -#define SEL_W_ATN_STOP 0x43 -#define SEL_W_ATN3 0x46 -#define EN_SEL_RESEL 0x44 -#define DIS_SEL_RESEL 0x45 /* currently not used */ -#define RESEL 0x40 /* " */ -#define RESEL_ATN3 0x47 /* " */ - -#define DATA_XFER_CMD INFO_XFER_CMD - - -/*; SCSI Status Reg.(+10H) (r) */ -#define INTERRUPT BIT7 -#define ILLEGAL_OP_ERR BIT6 -#define PARITY_ERR BIT5 -#define COUNT_2_ZERO BIT4 -#define GROUP_CODE_VALID BIT3 -#define SCSI_PHASE_MASK (BIT2+BIT1+BIT0) -/* BIT2: MSG phase; BIT1: C/D physe; BIT0: I/O phase */ - -/*; Interrupt Status Reg.(+14H) (r) */ -#define SCSI_RESET BIT7 -#define INVALID_CMD BIT6 -#define DISCONNECTED BIT5 -#define SERVICE_REQUEST BIT4 -#define SUCCESSFUL_OP BIT3 -#define RESELECTED BIT2 -#define SEL_ATTENTION BIT1 -#define SELECTED BIT0 - -/*; Internal State Reg.(+18H) (r) */ -#define SYNC_OFFSET_FLAG BIT3 -#define INTRN_STATE_MASK (BIT2+BIT1+BIT0) -/* 0x04: Sel. successful (w/o stop), 0x01: Sel. successful (w/ stop) */ - -/*; Clock Factor Reg.(+24H) (w) */ -#define CLK_FREQ_40MHZ 0 -#define CLK_FREQ_35MHZ (BIT2+BIT1+BIT0) -#define CLK_FREQ_30MHZ (BIT2+BIT1) -#define CLK_FREQ_25MHZ (BIT2+BIT0) -#define CLK_FREQ_20MHZ BIT2 -#define CLK_FREQ_15MHZ (BIT1+BIT0) -#define CLK_FREQ_10MHZ BIT1 - -/*; Control Reg. 1(+20H) (rw) */ -#define EXTENDED_TIMING BIT7 -#define DIS_INT_ON_SCSI_RST BIT6 -#define PARITY_ERR_REPO BIT4 -#define SCSI_ID_ON_BUS (BIT2+BIT1+BIT0) /* host adapter ID */ - -/*; Control Reg. 2(+2CH) (rw) */ -#define EN_FEATURE BIT6 -#define EN_SCSI2_CMD BIT3 - -/*; Control Reg. 3(+30H) (rw) */ -#define ID_MSG_CHECK BIT7 -#define EN_QTAG_MSG BIT6 -#define EN_GRP2_CMD BIT5 -#define FAST_SCSI BIT4 /* ;10MB/SEC */ -#define FAST_CLK BIT3 /* ;25 - 40 MHZ */ - -/*; Control Reg. 4(+34H) (rw) */ -#define EATER_12NS 0 -#define EATER_25NS BIT7 -#define EATER_35NS BIT6 -#define EATER_0NS (BIT7+BIT6) -#define REDUCED_POWER BIT5 -#define CTRL4_RESERVED BIT4 /* must be 1 acc. to AM53C974.c */ -#define NEGATE_REQACKDATA BIT2 -#define NEGATE_REQACK BIT3 - -#define GLITCH_TO_NS(x) (((~x>>6 & 2) >> 1) | ((x>>6 & 1) << 1 ^ (x>>6 & 2))) -#define NS_TO_GLITCH(y) (((~y<<7) | ~((y<<6) ^ ((y<<5 & 1<<6) | ~0x40))) & 0xc0) - -/* -;==================== -; DMA Register -;==================== -*/ -/*; DMA Command Reg.(+40H) (rw) */ -#define READ_DIRECTION BIT7 -#define WRITE_DIRECTION 0 -#define EN_DMA_INT BIT6 -#define EN_PAGE_INT BIT5 /* page transfer interrupt enable */ -#define MAP_TO_MDL BIT4 -#define DIAGNOSTIC BIT2 -#define DMA_IDLE_CMD 0 -#define DMA_BLAST_CMD BIT0 -#define DMA_ABORT_CMD BIT1 -#define DMA_START_CMD (BIT1+BIT0) - -/*; DMA Status Reg.(+54H) (r) */ -#define PCI_MS_ABORT BIT6 -#define BLAST_COMPLETE BIT5 -#define SCSI_INTERRUPT BIT4 -#define DMA_XFER_DONE BIT3 -#define DMA_XFER_ABORT BIT2 -#define DMA_XFER_ERROR BIT1 -#define POWER_DOWN BIT0 - -/*; DMA SCSI Bus and Ctrl.(+70H) */ -#define EN_INT_ON_PCI_ABORT BIT25 -#define WRT_ERASE_DMA_STAT BIT24 -#define PW_DOWN_CTRL BIT21 -#define SCSI_BUSY BIT20 -#define SCLK BIT19 -#define SCAM BIT18 -#define SCSI_LINES 0x0003ffff - -/* -;========================================================== -; SCSI Chip register address offset -;========================================================== -;Registers are rw unless declared otherwise -*/ -#define CtcReg_Low 0x00 /* r curr. transfer count */ -#define CtcReg_Mid 0x04 /* r */ -#define CtcReg_High 0x38 /* r */ -#define ScsiFifo 0x08 -#define ScsiCmd 0x0C -#define Scsi_Status 0x10 /* r */ -#define INT_Status 0x14 /* r */ -#define Sync_Period 0x18 /* w */ -#define Sync_Offset 0x1C /* w */ -#define Clk_Factor 0x24 /* w */ -#define CtrlReg1 0x20 -#define CtrlReg2 0x2C -#define CtrlReg3 0x30 -#define CtrlReg4 0x34 -#define DMA_Cmd 0x40 -#define DMA_XferCnt 0x44 /* rw starting transfer count (32 bit) */ -#define DMA_XferAddr 0x48 /* rw starting physical address (32 bit) */ -#define DMA_Wk_ByteCntr 0x4C /* r working byte counter */ -#define DMA_Wk_AddrCntr 0x50 /* r working address counter */ -#define DMA_Status 0x54 /* r */ -#define DMA_MDL_Addr 0x58 /* rw starting MDL address */ -#define DMA_Wk_MDL_Cntr 0x5C /* r working MDL counter */ -#define DMA_ScsiBusCtrl 0x70 /* rw SCSI Bus, PCI/DMA Ctrl */ - -#define StcReg_Low CtcReg_Low /* w start transfer count */ -#define StcReg_Mid CtcReg_Mid /* w */ -#define StcReg_High CtcReg_High /* w */ -#define Scsi_Dest_ID Scsi_Status /* w */ -#define Scsi_TimeOut INT_Status /* w */ -#define Intern_State Sync_Period /* r */ -#define Current_Fifo Sync_Offset /* r Curr. FIFO / int. state */ - - -#define DC390_read8(address) \ - (inb (pACB->IOPortBase + (address))) - -#define DC390_read8_(address, base) \ - (inb ((u16)(base) + (address))) - -#define DC390_read16(address) \ - (inw (pACB->IOPortBase + (address))) - -#define DC390_read32(address) \ - (inl (pACB->IOPortBase + (address))) - -#define DC390_write8(address,value) \ - outb ((value), pACB->IOPortBase + (address)) - -#define DC390_write8_(address,value,base) \ - outb ((value), (u16)(base) + (address)) - -#define DC390_write16(address,value) \ - outw ((value), pACB->IOPortBase + (address)) - -#define DC390_write32(address,value) \ - outl ((value), pACB->IOPortBase + (address)) - - -#endif /* _TMSCSIM_H */ -- cgit v1.2.3-59-g8ed1b From 0a3d775fb2c951d1bd3f5f916eee55face0eaada Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Mon, 24 Nov 2014 16:33:35 +0100 Subject: MAINTAINERS: add maintainer for i.MX DRM driver Add myself as the maintainer of the i.MX DRM driver. Signed-off-by: Philipp Zabel Acked-by: Shawn Guo Signed-off-by: Dave Airlie --- MAINTAINERS | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 3c69a3c73028..b29325d4a072 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3202,6 +3202,13 @@ F: drivers/gpu/drm/exynos/ F: include/drm/exynos* F: include/uapi/drm/exynos* +DRM DRIVERS FOR FREESCALE IMX +M: Philipp Zabel +L: dri-devel@lists.freedesktop.org +S: Maintained +F: drivers/gpu/drm/imx/ +F: Documentation/devicetree/bindings/drm/imx/ + DRM DRIVERS FOR NVIDIA TEGRA M: Thierry Reding M: Terje Bergström -- cgit v1.2.3-59-g8ed1b From 12ddbadf383d551e2681eb361b4f5c400363b5cd Mon Sep 17 00:00:00 2001 From: Beniamino Galvani Date: Tue, 18 Nov 2014 17:22:34 -0300 Subject: [media] media: rc: add driver for Amlogic Meson IR remote receiver Amlogic Meson SoCs include a infrared remote control receiver that can operate in two modes: "NEC" mode in which the hardware decodes frames using the NEC IR protocol, and "general" mode in which the receiver simply reports the duration of pulses and spaces for software decoding. This is a driver for the IR receiver that implements software decoding of received frames. Signed-off-by: Beniamino Galvani Acked-by: Carlo Caione Signed-off-by: Mauro Carvalho Chehab --- MAINTAINERS | 1 + drivers/media/rc/Kconfig | 11 +++ drivers/media/rc/Makefile | 1 + drivers/media/rc/meson-ir.c | 216 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 229 insertions(+) create mode 100644 drivers/media/rc/meson-ir.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index a6288cacde10..f6058ceb297d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -850,6 +850,7 @@ ARM/Amlogic MesonX SoC support M: Carlo Caione L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained +F: drivers/media/rc/meson-ir.c N: meson[x68] ARM/ATMEL AT91RM9200 AND AT91SAM ARM ARCHITECTURES diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig index 1aea7320f52a..ddfab256b9a5 100644 --- a/drivers/media/rc/Kconfig +++ b/drivers/media/rc/Kconfig @@ -223,6 +223,17 @@ config IR_FINTEK To compile this driver as a module, choose M here: the module will be called fintek-cir. +config IR_MESON + tristate "Amlogic Meson IR remote receiver" + depends on RC_CORE + depends on ARCH_MESON || COMPILE_TEST + ---help--- + Say Y if you want to use the IR remote receiver available + on Amlogic Meson SoCs. + + To compile this driver as a module, choose M here: the + module will be called meson-ir. + config IR_NUVOTON tristate "Nuvoton w836x7hg Consumer Infrared Transceiver" depends on PNP diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile index 8f509e0d92cf..379a5c0f1379 100644 --- a/drivers/media/rc/Makefile +++ b/drivers/media/rc/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_IR_IMON) += imon.o obj-$(CONFIG_IR_ITE_CIR) += ite-cir.o obj-$(CONFIG_IR_MCEUSB) += mceusb.o obj-$(CONFIG_IR_FINTEK) += fintek-cir.o +obj-$(CONFIG_IR_MESON) += meson-ir.o obj-$(CONFIG_IR_NUVOTON) += nuvoton-cir.o obj-$(CONFIG_IR_ENE) += ene_ir.o obj-$(CONFIG_IR_REDRAT3) += redrat3.o diff --git a/drivers/media/rc/meson-ir.c b/drivers/media/rc/meson-ir.c new file mode 100644 index 000000000000..fcc3b82d1454 --- /dev/null +++ b/drivers/media/rc/meson-ir.c @@ -0,0 +1,216 @@ +/* + * Driver for Amlogic Meson IR remote receiver + * + * Copyright (C) 2014 Beniamino Galvani + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define DRIVER_NAME "meson-ir" + +#define IR_DEC_LDR_ACTIVE 0x00 +#define IR_DEC_LDR_IDLE 0x04 +#define IR_DEC_LDR_REPEAT 0x08 +#define IR_DEC_BIT_0 0x0c +#define IR_DEC_REG0 0x10 +#define IR_DEC_FRAME 0x14 +#define IR_DEC_STATUS 0x18 +#define IR_DEC_REG1 0x1c + +#define REG0_RATE_MASK (BIT(11) - 1) + +#define REG1_MODE_MASK (BIT(7) | BIT(8)) +#define REG1_MODE_NEC (0 << 7) +#define REG1_MODE_GENERAL (2 << 7) + +#define REG1_TIME_IV_SHIFT 16 +#define REG1_TIME_IV_MASK ((BIT(13) - 1) << REG1_TIME_IV_SHIFT) + +#define REG1_IRQSEL_MASK (BIT(2) | BIT(3)) +#define REG1_IRQSEL_NEC_MODE (0 << 2) +#define REG1_IRQSEL_RISE_FALL (1 << 2) +#define REG1_IRQSEL_FALL (2 << 2) +#define REG1_IRQSEL_RISE (3 << 2) + +#define REG1_RESET BIT(0) +#define REG1_ENABLE BIT(15) + +#define STATUS_IR_DEC_IN BIT(8) + +#define MESON_TRATE 10 /* us */ + +struct meson_ir { + void __iomem *reg; + struct rc_dev *rc; + int irq; + spinlock_t lock; +}; + +static void meson_ir_set_mask(struct meson_ir *ir, unsigned int reg, + u32 mask, u32 value) +{ + u32 data; + + data = readl(ir->reg + reg); + data &= ~mask; + data |= (value & mask); + writel(data, ir->reg + reg); +} + +static irqreturn_t meson_ir_irq(int irqno, void *dev_id) +{ + struct meson_ir *ir = dev_id; + u32 duration; + DEFINE_IR_RAW_EVENT(rawir); + + spin_lock(&ir->lock); + + duration = readl(ir->reg + IR_DEC_REG1); + duration = (duration & REG1_TIME_IV_MASK) >> REG1_TIME_IV_SHIFT; + rawir.duration = US_TO_NS(duration * MESON_TRATE); + + rawir.pulse = !!(readl(ir->reg + IR_DEC_STATUS) & STATUS_IR_DEC_IN); + + ir_raw_event_store_with_filter(ir->rc, &rawir); + ir_raw_event_handle(ir->rc); + + spin_unlock(&ir->lock); + + return IRQ_HANDLED; +} + +static int meson_ir_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct resource *res; + const char *map_name; + struct meson_ir *ir; + int ret; + + ir = devm_kzalloc(dev, sizeof(struct meson_ir), GFP_KERNEL); + if (!ir) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ir->reg = devm_ioremap_resource(dev, res); + if (IS_ERR(ir->reg)) { + dev_err(dev, "failed to map registers\n"); + return PTR_ERR(ir->reg); + } + + ir->irq = platform_get_irq(pdev, 0); + if (ir->irq < 0) { + dev_err(dev, "no irq resource\n"); + return ir->irq; + } + + ir->rc = rc_allocate_device(); + if (!ir->rc) { + dev_err(dev, "failed to allocate rc device\n"); + return -ENOMEM; + } + + ir->rc->priv = ir; + ir->rc->input_name = DRIVER_NAME; + ir->rc->input_phys = DRIVER_NAME "/input0"; + ir->rc->input_id.bustype = BUS_HOST; + map_name = of_get_property(node, "linux,rc-map-name", NULL); + ir->rc->map_name = map_name ? map_name : RC_MAP_EMPTY; + ir->rc->dev.parent = dev; + ir->rc->driver_type = RC_DRIVER_IR_RAW; + ir->rc->allowed_protocols = RC_BIT_ALL; + ir->rc->rx_resolution = US_TO_NS(MESON_TRATE); + ir->rc->timeout = MS_TO_NS(200); + ir->rc->driver_name = DRIVER_NAME; + + spin_lock_init(&ir->lock); + platform_set_drvdata(pdev, ir); + + ret = rc_register_device(ir->rc); + if (ret) { + dev_err(dev, "failed to register rc device\n"); + goto out_free; + } + + ret = devm_request_irq(dev, ir->irq, meson_ir_irq, 0, "ir-meson", ir); + if (ret) { + dev_err(dev, "failed to request irq\n"); + goto out_unreg; + } + + /* Reset the decoder */ + meson_ir_set_mask(ir, IR_DEC_REG1, REG1_RESET, REG1_RESET); + meson_ir_set_mask(ir, IR_DEC_REG1, REG1_RESET, 0); + /* Set general operation mode */ + meson_ir_set_mask(ir, IR_DEC_REG1, REG1_MODE_MASK, REG1_MODE_GENERAL); + /* Set rate */ + meson_ir_set_mask(ir, IR_DEC_REG0, REG0_RATE_MASK, MESON_TRATE - 1); + /* IRQ on rising and falling edges */ + meson_ir_set_mask(ir, IR_DEC_REG1, REG1_IRQSEL_MASK, + REG1_IRQSEL_RISE_FALL); + /* Enable the decoder */ + meson_ir_set_mask(ir, IR_DEC_REG1, REG1_ENABLE, REG1_ENABLE); + + dev_info(dev, "receiver initialized\n"); + + return 0; +out_unreg: + rc_unregister_device(ir->rc); + ir->rc = NULL; +out_free: + rc_free_device(ir->rc); + + return ret; +} + +static int meson_ir_remove(struct platform_device *pdev) +{ + struct meson_ir *ir = platform_get_drvdata(pdev); + unsigned long flags; + + /* Disable the decoder */ + spin_lock_irqsave(&ir->lock, flags); + meson_ir_set_mask(ir, IR_DEC_REG1, REG1_ENABLE, 0); + spin_unlock_irqrestore(&ir->lock, flags); + + rc_unregister_device(ir->rc); + + return 0; +} + +static const struct of_device_id meson_ir_match[] = { + { .compatible = "amlogic,meson6-ir" }, + { }, +}; + +static struct platform_driver meson_ir_driver = { + .probe = meson_ir_probe, + .remove = meson_ir_remove, + .driver = { + .name = DRIVER_NAME, + .of_match_table = meson_ir_match, + }, +}; + +module_platform_driver(meson_ir_driver); + +MODULE_DESCRIPTION("Amlogic Meson IR remote receiver driver"); +MODULE_AUTHOR("Beniamino Galvani "); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3-59-g8ed1b From 7a25ec8e481e9c14de13dcacca0d8ee33bfe5f3c Mon Sep 17 00:00:00 2001 From: Mathieu Poirier Date: Mon, 10 Nov 2014 14:06:42 -0700 Subject: coresight: Adding ABI documentation Signed-off-by: Mathieu Poirier Signed-off-by: Greg Kroah-Hartman --- .../ABI/testing/sysfs-bus-coresight-devices-etb10 | 24 ++ .../ABI/testing/sysfs-bus-coresight-devices-etm3x | 253 +++++++++++++++++++++ .../ABI/testing/sysfs-bus-coresight-devices-funnel | 12 + .../ABI/testing/sysfs-bus-coresight-devices-tmc | 8 + MAINTAINERS | 1 + 5 files changed, 298 insertions(+) create mode 100644 Documentation/ABI/testing/sysfs-bus-coresight-devices-etb10 create mode 100644 Documentation/ABI/testing/sysfs-bus-coresight-devices-etm3x create mode 100644 Documentation/ABI/testing/sysfs-bus-coresight-devices-funnel create mode 100644 Documentation/ABI/testing/sysfs-bus-coresight-devices-tmc (limited to 'MAINTAINERS') diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-etb10 b/Documentation/ABI/testing/sysfs-bus-coresight-devices-etb10 new file mode 100644 index 000000000000..4b8d6ec92e2b --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-etb10 @@ -0,0 +1,24 @@ +What: /sys/bus/coresight/devices/.etb/enable_sink +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Add/remove a sink from a trace path. There can be multiple + source for a single sink. + ex: echo 1 > /sys/bus/coresight/devices/20010000.etb/enable_sink + +What: /sys/bus/coresight/devices/.etb/status +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (R) List various control and status registers. The specific + layout and content is driver specific. + +What: /sys/bus/coresight/devices/.etb/trigger_cntr +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Disables write access to the Trace RAM by stopping the + formatter after a defined number of words have been stored + following the trigger event. The number of 32-bit words written + into the Trace RAM following the trigger event is equal to the + value stored in this register+1 (from ARM ETB-TRM). diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm3x b/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm3x new file mode 100644 index 000000000000..b4d0b99afffb --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm3x @@ -0,0 +1,253 @@ +What: /sys/bus/coresight/devices/.[etm|ptm]/enable_source +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Enable/disable tracing on this specific trace entiry. + Enabling a source implies the source has been configured + properly and a sink has been identidifed for it. The path + of coresight components linking the source to the sink is + configured and managed automatically by the coresight framework. + +What: /sys/bus/coresight/devices/.[etm|ptm]/status +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (R) List various control and status registers. The specific + layout and content is driver specific. + +What: /sys/bus/coresight/devices/.[etm|ptm]/addr_idx +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: Select which address comparator or pair (of comparators) to + work with. + +What: /sys/bus/coresight/devices/.[etm|ptm]/addr_acctype +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Used in conjunction with @addr_idx. Specifies + characteristics about the address comparator being configure, + for example the access type, the kind of instruction to trace, + processor contect ID to trigger on, etc. Individual fields in + the access type register may vary on the version of the trace + entity. + +What: /sys/bus/coresight/devices/.[etm|ptm]/addr_range +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Used in conjunction with @addr_idx. Specifies the range of + addresses to trigger on. Inclusion or exclusion is specificed + in the corresponding access type register. + +What: /sys/bus/coresight/devices/.[etm|ptm]/addr_single +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Used in conjunction with @addr_idx. Specifies the single + address to trigger on, highly influenced by the configuration + options of the corresponding access type register. + +What: /sys/bus/coresight/devices/.[etm|ptm]/addr_start +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Used in conjunction with @addr_idx. Specifies the single + address to start tracing on, highly influenced by the + configuration options of the corresponding access type register. + +What: /sys/bus/coresight/devices/.[etm|ptm]/addr_stop +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Used in conjunction with @addr_idx. Specifies the single + address to stop tracing on, highly influenced by the + configuration options of the corresponding access type register. + +What: /sys/bus/coresight/devices/.[etm|ptm]/cntr_idx +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Specifies the counter to work on. + +What: /sys/bus/coresight/devices/.[etm|ptm]/cntr_event +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Used in conjunction with cntr_idx, give access to the + counter event register. + +What: /sys/bus/coresight/devices/.[etm|ptm]/cntr_val +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Used in conjunction with cntr_idx, give access to the + counter value register. + +What: /sys/bus/coresight/devices/.[etm|ptm]/cntr_rld_val +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Used in conjunction with cntr_idx, give access to the + counter reload value register. + +What: /sys/bus/coresight/devices/.[etm|ptm]/cntr_rld_event +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Used in conjunction with cntr_idx, give access to the + counter reload event register. + +What: /sys/bus/coresight/devices/.[etm|ptm]/ctxid_idx +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Specifies the index of the context ID register to be + selected. + +What: /sys/bus/coresight/devices/.[etm|ptm]/ctxid_mask +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Mask to apply to all the context ID comparator. + +What: /sys/bus/coresight/devices/.[etm|ptm]/ctxid_val +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Used with the ctxid_idx, specify with context ID to trigger + on. + +What: /sys/bus/coresight/devices/.[etm|ptm]/enable_event +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Defines which event triggers a trace. + +What: /sys/bus/coresight/devices/.[etm|ptm]/etmsr +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Gives access to the ETM status register, which holds + programming information and status on certains events. + +What: /sys/bus/coresight/devices/.[etm|ptm]/fifofull_level +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Number of byte left in the fifo before considering it full. + Depending on the tracer's version, can also hold threshold for + data suppression. + +What: /sys/bus/coresight/devices/.[etm|ptm]/mode +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Interface with the driver's 'mode' field, controlling + various aspect of the trace entity such as time stamping, + context ID size and cycle accurate tracing. Driver specific + and bound to change depending on the driver. + +What: /sys/bus/coresight/devices/.[etm|ptm]/nr_addr_cmp +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (R) Provides the number of address comparators pairs accessible + on a trace unit, as specified by bit 3:0 of register ETMCCR. + +What: /sys/bus/coresight/devices/.[etm|ptm]/nr_cntr +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (R) Provides the number of counters accessible on a trace unit, + as specified by bit 15:13 of register ETMCCR. + +What: /sys/bus/coresight/devices/.[etm|ptm]/nr_ctxid_cmp +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (R) Provides the number of context ID comparator available on a + trace unit, as specified by bit 25:24 of register ETMCCR. + +What: /sys/bus/coresight/devices/.[etm|ptm]/reset +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (W) Cancels all configuration on a trace unit and set it back + to its boot configuration. + +What: /sys/bus/coresight/devices/.[etm|ptm]/seq_12_event +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Defines the event that causes the sequencer to transition + from state 1 to state 2. + +What: /sys/bus/coresight/devices/.[etm|ptm]/seq_13_event +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Defines the event that causes the sequencer to transition + from state 1 to state 3. + +What: /sys/bus/coresight/devices/.[etm|ptm]/seq_21_event +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Defines the event that causes the sequencer to transition + from state 2 to state 1. + +What: /sys/bus/coresight/devices/.[etm|ptm]/seq_23_event +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Defines the event that causes the sequencer to transition + from state 2 to state 3. + +What: /sys/bus/coresight/devices/.[etm|ptm]/seq_31_event +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Defines the event that causes the sequencer to transition + from state 3 to state 1. + +What: /sys/bus/coresight/devices/.[etm|ptm]/seq_32_event +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Defines the event that causes the sequencer to transition + from state 3 to state 2. + +What: /sys/bus/coresight/devices/.[etm|ptm]/curr_seq_state +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (R) Holds the current state of the sequencer. + +What: /sys/bus/coresight/devices/.[etm|ptm]/sync_freq +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Holds the trace synchronization frequency value - must be + programmed with the various implementation behavior in mind. + +What: /sys/bus/coresight/devices/.[etm|ptm]/timestamp_event +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Defines an event that requests the insertion of a timestamp + into the trace stream. + +What: /sys/bus/coresight/devices/.[etm|ptm]/traceid +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Holds the trace ID that will appear in the trace stream + coming from this trace entity. + +What: /sys/bus/coresight/devices/.[etm|ptm]/trigger_event +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Define the event that controls the trigger. diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-funnel b/Documentation/ABI/testing/sysfs-bus-coresight-devices-funnel new file mode 100644 index 000000000000..d75acda5e1b3 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-funnel @@ -0,0 +1,12 @@ +What: /sys/bus/coresight/devices/.funnel/funnel_ctrl +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Enables the slave ports and defines the hold time of the + slave ports. + +What: /sys/bus/coresight/devices/.funnel/priority +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Defines input port priority order. diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-tmc b/Documentation/ABI/testing/sysfs-bus-coresight-devices-tmc new file mode 100644 index 000000000000..f38cded5fa22 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-tmc @@ -0,0 +1,8 @@ +What: /sys/bus/coresight/devices/.tmc/trigger_cntr +Date: November 2014 +KernelVersion: 3.19 +Contact: Mathieu Poirier +Description: (RW) Disables write access to the Trace RAM by stopping the + formatter after a defined number of words have been stored + following the trigger event. Additional interface for this + driver are expected to be added as it matures. diff --git a/MAINTAINERS b/MAINTAINERS index 39952634be8a..7d180809cae3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -925,6 +925,7 @@ S: Maintained F: drivers/coresight/* F: Documentation/trace/coresight.txt F: Documentation/devicetree/bindings/arm/coresight.txt +F: Documentation/ABI/testing/sysfs-bus-coresight-devices-* ARM/CORGI MACHINE SUPPORT M: Richard Purdie -- cgit v1.2.3-59-g8ed1b From daac6f8642f786a9576b2b6b0224012d54e4506b Mon Sep 17 00:00:00 2001 From: Peter Griffin Date: Wed, 19 Nov 2014 08:44:54 +0000 Subject: MAINTAINERS: Add ahci_st.c to ARCH/STI architecture This patch adds the ahci_st.c driver found on STMicroelectronics stih41x consumer electronics SoC's into the STI arch section of the maintainers file. Signed-off-by: Peter Griffin Acked-by: Lee Jones Acked-by: Maxime Coquelin Signed-off-by: Arnd Bergmann --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index d45abb11cfd1..fdbab2b19a1a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1422,6 +1422,7 @@ F: drivers/tty/serial/st-asc.c F: drivers/usb/dwc3/dwc3-st.c F: drivers/usb/host/ehci-st.c F: drivers/usb/host/ohci-st.c +F: drivers/ata/ahci_st.c ARM/TECHNOLOGIC SYSTEMS TS7250 MACHINE SUPPORT M: Lennert Buytenhek -- cgit v1.2.3-59-g8ed1b From 740d93b123fcc1fb5308f7c42a2ff4c755d69342 Mon Sep 17 00:00:00 2001 From: Pawel Moll Date: Wed, 26 Nov 2014 12:13:05 +0000 Subject: MAINTAINERS: ARM Versatile Express platform This patch adds a separate section for the ARM Versatile Express platform maintainers, listing all different bits and bobs used by it. Acked-by: Liviu Dudau Acked-by: Sudeep Holla Acked-by: Lorenzo Pieralisi Signed-off-by: Pawel Moll Acked-by: Michael Turquette Signed-off-by: Arnd Bergmann --- MAINTAINERS | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index fdbab2b19a1a..63542b9ec6cc 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1496,6 +1496,18 @@ S: Maintained F: drivers/clk/ux500/ F: include/linux/platform_data/clk-ux500.h +ARM/VERSATILE EXPRESS PLATFORM +M: Liviu Dudau +M: Sudeep Holla +M: Lorenzo Pieralisi +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: arch/arm/boot/dts/vexpress* +F: arch/arm/mach-vexpress/ +F: */*/vexpress* +F: drivers/clk/versatile/clk-vexpress-osc.c +F: drivers/clocksource/versatile.c + ARM/VFP SUPPORT M: Russell King L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -- cgit v1.2.3-59-g8ed1b From 186401937927426f85a28bd798e82ca18e4e5549 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Thu, 20 Nov 2014 09:13:42 -0800 Subject: memory: gpmc: Move omap gpmc code to live under drivers Just move to drivers as further clean-up can now happen there finally. Let's also add Roger and me to the MAINTAINERS so we get notified for any patches related to GPMC. Cc: Arnd Bergmann Acked-by: Roger Quadros Signed-off-by: Tony Lindgren --- MAINTAINERS | 8 + arch/arm/mach-omap2/Kconfig | 2 + arch/arm/mach-omap2/Makefile | 2 +- arch/arm/mach-omap2/gpmc.c | 2094 ------------------------------------------ drivers/memory/Kconfig | 8 + drivers/memory/Makefile | 1 + drivers/memory/omap-gpmc.c | 2094 ++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 2114 insertions(+), 2095 deletions(-) delete mode 100644 arch/arm/mach-omap2/gpmc.c create mode 100644 drivers/memory/omap-gpmc.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index dab92a78d1d5..78cc0595d5d4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6629,6 +6629,14 @@ L: linux-omap@vger.kernel.org S: Maintained F: sound/soc/omap/ +OMAP GENERAL PURPOSE MEMORY CONTROLLER SUPPORT +M: Roger Quadros +M: Tony Lindgren +L: linux-omap@vger.kernel.org +S: Maintained +F: drivers/memory/omap-gpmc.c +F: arch/arm/mach-omap2/*gpmc* + OMAP FRAMEBUFFER SUPPORT M: Tomi Valkeinen L: linux-fbdev@vger.kernel.org diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index f4d06aea8460..0ea218e3dce5 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -79,7 +79,9 @@ config ARCH_OMAP2PLUS select CLKSRC_MMIO select GENERIC_IRQ_CHIP select MACH_OMAP_GENERIC + select MEMORY select OMAP_DM_TIMER + select OMAP_GPMC select PINCTRL select SOC_BUS select TI_PRIV_EDMA diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index 3e824f8fec48..bd85741a9724 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -6,7 +6,7 @@ ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \ -I$(srctree)/arch/arm/plat-omap/include # Common support -obj-y := id.o io.o control.o mux.o devices.o fb.o serial.o gpmc.o timer.o pm.o \ +obj-y := id.o io.o control.o mux.o devices.o fb.o serial.o timer.o pm.o \ common.o gpio.o dma.o wd_timer.o display.o i2c.o hdq1w.o omap_hwmod.o \ omap_device.o sram.o drm.o diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c deleted file mode 100644 index abfc0ddd076e..000000000000 --- a/arch/arm/mach-omap2/gpmc.c +++ /dev/null @@ -1,2094 +0,0 @@ -/* - * GPMC support functions - * - * Copyright (C) 2005-2006 Nokia Corporation - * - * Author: Juha Yrjola - * - * Copyright (C) 2009 Texas Instruments - * Added OMAP4 support - Santosh Shilimkar - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#undef DEBUG - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include - -#define DEVICE_NAME "omap-gpmc" - -/* GPMC register offsets */ -#define GPMC_REVISION 0x00 -#define GPMC_SYSCONFIG 0x10 -#define GPMC_SYSSTATUS 0x14 -#define GPMC_IRQSTATUS 0x18 -#define GPMC_IRQENABLE 0x1c -#define GPMC_TIMEOUT_CONTROL 0x40 -#define GPMC_ERR_ADDRESS 0x44 -#define GPMC_ERR_TYPE 0x48 -#define GPMC_CONFIG 0x50 -#define GPMC_STATUS 0x54 -#define GPMC_PREFETCH_CONFIG1 0x1e0 -#define GPMC_PREFETCH_CONFIG2 0x1e4 -#define GPMC_PREFETCH_CONTROL 0x1ec -#define GPMC_PREFETCH_STATUS 0x1f0 -#define GPMC_ECC_CONFIG 0x1f4 -#define GPMC_ECC_CONTROL 0x1f8 -#define GPMC_ECC_SIZE_CONFIG 0x1fc -#define GPMC_ECC1_RESULT 0x200 -#define GPMC_ECC_BCH_RESULT_0 0x240 /* not available on OMAP2 */ -#define GPMC_ECC_BCH_RESULT_1 0x244 /* not available on OMAP2 */ -#define GPMC_ECC_BCH_RESULT_2 0x248 /* not available on OMAP2 */ -#define GPMC_ECC_BCH_RESULT_3 0x24c /* not available on OMAP2 */ -#define GPMC_ECC_BCH_RESULT_4 0x300 /* not available on OMAP2 */ -#define GPMC_ECC_BCH_RESULT_5 0x304 /* not available on OMAP2 */ -#define GPMC_ECC_BCH_RESULT_6 0x308 /* not available on OMAP2 */ - -/* GPMC ECC control settings */ -#define GPMC_ECC_CTRL_ECCCLEAR 0x100 -#define GPMC_ECC_CTRL_ECCDISABLE 0x000 -#define GPMC_ECC_CTRL_ECCREG1 0x001 -#define GPMC_ECC_CTRL_ECCREG2 0x002 -#define GPMC_ECC_CTRL_ECCREG3 0x003 -#define GPMC_ECC_CTRL_ECCREG4 0x004 -#define GPMC_ECC_CTRL_ECCREG5 0x005 -#define GPMC_ECC_CTRL_ECCREG6 0x006 -#define GPMC_ECC_CTRL_ECCREG7 0x007 -#define GPMC_ECC_CTRL_ECCREG8 0x008 -#define GPMC_ECC_CTRL_ECCREG9 0x009 - -#define GPMC_CONFIG_LIMITEDADDRESS BIT(1) - -#define GPMC_CONFIG2_CSEXTRADELAY BIT(7) -#define GPMC_CONFIG3_ADVEXTRADELAY BIT(7) -#define GPMC_CONFIG4_OEEXTRADELAY BIT(7) -#define GPMC_CONFIG4_WEEXTRADELAY BIT(23) -#define GPMC_CONFIG6_CYCLE2CYCLEDIFFCSEN BIT(6) -#define GPMC_CONFIG6_CYCLE2CYCLESAMECSEN BIT(7) - -#define GPMC_CS0_OFFSET 0x60 -#define GPMC_CS_SIZE 0x30 -#define GPMC_BCH_SIZE 0x10 - -#define GPMC_MEM_END 0x3FFFFFFF - -#define GPMC_CHUNK_SHIFT 24 /* 16 MB */ -#define GPMC_SECTION_SHIFT 28 /* 128 MB */ - -#define CS_NUM_SHIFT 24 -#define ENABLE_PREFETCH (0x1 << 7) -#define DMA_MPU_MODE 2 - -#define GPMC_REVISION_MAJOR(l) ((l >> 4) & 0xf) -#define GPMC_REVISION_MINOR(l) (l & 0xf) - -#define GPMC_HAS_WR_ACCESS 0x1 -#define GPMC_HAS_WR_DATA_MUX_BUS 0x2 -#define GPMC_HAS_MUX_AAD 0x4 - -#define GPMC_NR_WAITPINS 4 - -#define GPMC_CS_CONFIG1 0x00 -#define GPMC_CS_CONFIG2 0x04 -#define GPMC_CS_CONFIG3 0x08 -#define GPMC_CS_CONFIG4 0x0c -#define GPMC_CS_CONFIG5 0x10 -#define GPMC_CS_CONFIG6 0x14 -#define GPMC_CS_CONFIG7 0x18 -#define GPMC_CS_NAND_COMMAND 0x1c -#define GPMC_CS_NAND_ADDRESS 0x20 -#define GPMC_CS_NAND_DATA 0x24 - -/* Control Commands */ -#define GPMC_CONFIG_RDY_BSY 0x00000001 -#define GPMC_CONFIG_DEV_SIZE 0x00000002 -#define GPMC_CONFIG_DEV_TYPE 0x00000003 -#define GPMC_SET_IRQ_STATUS 0x00000004 - -#define GPMC_CONFIG1_WRAPBURST_SUPP (1 << 31) -#define GPMC_CONFIG1_READMULTIPLE_SUPP (1 << 30) -#define GPMC_CONFIG1_READTYPE_ASYNC (0 << 29) -#define GPMC_CONFIG1_READTYPE_SYNC (1 << 29) -#define GPMC_CONFIG1_WRITEMULTIPLE_SUPP (1 << 28) -#define GPMC_CONFIG1_WRITETYPE_ASYNC (0 << 27) -#define GPMC_CONFIG1_WRITETYPE_SYNC (1 << 27) -#define GPMC_CONFIG1_CLKACTIVATIONTIME(val) ((val & 3) << 25) -#define GPMC_CONFIG1_PAGE_LEN(val) ((val & 3) << 23) -#define GPMC_CONFIG1_WAIT_READ_MON (1 << 22) -#define GPMC_CONFIG1_WAIT_WRITE_MON (1 << 21) -#define GPMC_CONFIG1_WAIT_MON_IIME(val) ((val & 3) << 18) -#define GPMC_CONFIG1_WAIT_PIN_SEL(val) ((val & 3) << 16) -#define GPMC_CONFIG1_DEVICESIZE(val) ((val & 3) << 12) -#define GPMC_CONFIG1_DEVICESIZE_16 GPMC_CONFIG1_DEVICESIZE(1) -#define GPMC_CONFIG1_DEVICETYPE(val) ((val & 3) << 10) -#define GPMC_CONFIG1_DEVICETYPE_NOR GPMC_CONFIG1_DEVICETYPE(0) -#define GPMC_CONFIG1_MUXTYPE(val) ((val & 3) << 8) -#define GPMC_CONFIG1_TIME_PARA_GRAN (1 << 4) -#define GPMC_CONFIG1_FCLK_DIV(val) (val & 3) -#define GPMC_CONFIG1_FCLK_DIV2 (GPMC_CONFIG1_FCLK_DIV(1)) -#define GPMC_CONFIG1_FCLK_DIV3 (GPMC_CONFIG1_FCLK_DIV(2)) -#define GPMC_CONFIG1_FCLK_DIV4 (GPMC_CONFIG1_FCLK_DIV(3)) -#define GPMC_CONFIG7_CSVALID (1 << 6) - -#define GPMC_DEVICETYPE_NOR 0 -#define GPMC_DEVICETYPE_NAND 2 -#define GPMC_CONFIG_WRITEPROTECT 0x00000010 -#define WR_RD_PIN_MONITORING 0x00600000 - -#define GPMC_ENABLE_IRQ 0x0000000d - -/* ECC commands */ -#define GPMC_ECC_READ 0 /* Reset Hardware ECC for read */ -#define GPMC_ECC_WRITE 1 /* Reset Hardware ECC for write */ -#define GPMC_ECC_READSYN 2 /* Reset before syndrom is read back */ - -/* XXX: Only NAND irq has been considered,currently these are the only ones used - */ -#define GPMC_NR_IRQ 2 - -struct gpmc_cs_data { - const char *name; - -#define GPMC_CS_RESERVED (1 << 0) - u32 flags; - - struct resource mem; -}; - -struct gpmc_client_irq { - unsigned irq; - u32 bitmask; -}; - -/* Structure to save gpmc cs context */ -struct gpmc_cs_config { - u32 config1; - u32 config2; - u32 config3; - u32 config4; - u32 config5; - u32 config6; - u32 config7; - int is_valid; -}; - -/* - * Structure to save/restore gpmc context - * to support core off on OMAP3 - */ -struct omap3_gpmc_regs { - u32 sysconfig; - u32 irqenable; - u32 timeout_ctrl; - u32 config; - u32 prefetch_config1; - u32 prefetch_config2; - u32 prefetch_control; - struct gpmc_cs_config cs_context[GPMC_CS_NUM]; -}; - -static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ]; -static struct irq_chip gpmc_irq_chip; -static int gpmc_irq_start; - -static struct resource gpmc_mem_root; -static struct gpmc_cs_data gpmc_cs[GPMC_CS_NUM]; -static DEFINE_SPINLOCK(gpmc_mem_lock); -/* Define chip-selects as reserved by default until probe completes */ -static unsigned int gpmc_cs_num = GPMC_CS_NUM; -static unsigned int gpmc_nr_waitpins; -static struct device *gpmc_dev; -static int gpmc_irq; -static resource_size_t phys_base, mem_size; -static unsigned gpmc_capability; -static void __iomem *gpmc_base; - -static struct clk *gpmc_l3_clk; - -static irqreturn_t gpmc_handle_irq(int irq, void *dev); - -static void gpmc_write_reg(int idx, u32 val) -{ - writel_relaxed(val, gpmc_base + idx); -} - -static u32 gpmc_read_reg(int idx) -{ - return readl_relaxed(gpmc_base + idx); -} - -void gpmc_cs_write_reg(int cs, int idx, u32 val) -{ - void __iomem *reg_addr; - - reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx; - writel_relaxed(val, reg_addr); -} - -static u32 gpmc_cs_read_reg(int cs, int idx) -{ - void __iomem *reg_addr; - - reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx; - return readl_relaxed(reg_addr); -} - -/* TODO: Add support for gpmc_fck to clock framework and use it */ -static unsigned long gpmc_get_fclk_period(void) -{ - unsigned long rate = clk_get_rate(gpmc_l3_clk); - - rate /= 1000; - rate = 1000000000 / rate; /* In picoseconds */ - - return rate; -} - -static unsigned int gpmc_ns_to_ticks(unsigned int time_ns) -{ - unsigned long tick_ps; - - /* Calculate in picosecs to yield more exact results */ - tick_ps = gpmc_get_fclk_period(); - - return (time_ns * 1000 + tick_ps - 1) / tick_ps; -} - -static unsigned int gpmc_ps_to_ticks(unsigned int time_ps) -{ - unsigned long tick_ps; - - /* Calculate in picosecs to yield more exact results */ - tick_ps = gpmc_get_fclk_period(); - - return (time_ps + tick_ps - 1) / tick_ps; -} - -unsigned int gpmc_ticks_to_ns(unsigned int ticks) -{ - return ticks * gpmc_get_fclk_period() / 1000; -} - -static unsigned int gpmc_ticks_to_ps(unsigned int ticks) -{ - return ticks * gpmc_get_fclk_period(); -} - -static unsigned int gpmc_round_ps_to_ticks(unsigned int time_ps) -{ - unsigned long ticks = gpmc_ps_to_ticks(time_ps); - - return ticks * gpmc_get_fclk_period(); -} - -static inline void gpmc_cs_modify_reg(int cs, int reg, u32 mask, bool value) -{ - u32 l; - - l = gpmc_cs_read_reg(cs, reg); - if (value) - l |= mask; - else - l &= ~mask; - gpmc_cs_write_reg(cs, reg, l); -} - -static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p) -{ - gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG1, - GPMC_CONFIG1_TIME_PARA_GRAN, - p->time_para_granularity); - gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG2, - GPMC_CONFIG2_CSEXTRADELAY, p->cs_extra_delay); - gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG3, - GPMC_CONFIG3_ADVEXTRADELAY, p->adv_extra_delay); - gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4, - GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay); - gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4, - GPMC_CONFIG4_OEEXTRADELAY, p->we_extra_delay); - gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6, - GPMC_CONFIG6_CYCLE2CYCLESAMECSEN, - p->cycle2cyclesamecsen); - gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6, - GPMC_CONFIG6_CYCLE2CYCLEDIFFCSEN, - p->cycle2cyclediffcsen); -} - -#ifdef DEBUG -static int get_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit, - bool raw, bool noval, int shift, - const char *name) -{ - u32 l; - int nr_bits, max_value, mask; - - l = gpmc_cs_read_reg(cs, reg); - nr_bits = end_bit - st_bit + 1; - max_value = (1 << nr_bits) - 1; - mask = max_value << st_bit; - l = (l & mask) >> st_bit; - if (shift) - l = (shift << l); - if (noval && (l == 0)) - return 0; - if (!raw) { - unsigned int time_ns_min, time_ns, time_ns_max; - - time_ns_min = gpmc_ticks_to_ns(l ? l - 1 : 0); - time_ns = gpmc_ticks_to_ns(l); - time_ns_max = gpmc_ticks_to_ns(l + 1 > max_value ? - max_value : l + 1); - pr_info("gpmc,%s = <%u> (%u - %u ns, %i ticks)\n", - name, time_ns, time_ns_min, time_ns_max, l); - } else { - pr_info("gpmc,%s = <%u>\n", name, l); - } - - return l; -} - -#define GPMC_PRINT_CONFIG(cs, config) \ - pr_info("cs%i %s: 0x%08x\n", cs, #config, \ - gpmc_cs_read_reg(cs, config)) -#define GPMC_GET_RAW(reg, st, end, field) \ - get_gpmc_timing_reg(cs, (reg), (st), (end), 1, 0, 0, field) -#define GPMC_GET_RAW_BOOL(reg, st, end, field) \ - get_gpmc_timing_reg(cs, (reg), (st), (end), 1, 1, 0, field) -#define GPMC_GET_RAW_SHIFT(reg, st, end, shift, field) \ - get_gpmc_timing_reg(cs, (reg), (st), (end), 1, 1, (shift), field) -#define GPMC_GET_TICKS(reg, st, end, field) \ - get_gpmc_timing_reg(cs, (reg), (st), (end), 0, 0, 0, field) - -static void gpmc_show_regs(int cs, const char *desc) -{ - pr_info("gpmc cs%i %s:\n", cs, desc); - GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG1); - GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG2); - GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG3); - GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG4); - GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG5); - GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG6); -} - -/* - * Note that gpmc,wait-pin handing wrongly assumes bit 8 is available, - * see commit c9fb809. - */ -static void gpmc_cs_show_timings(int cs, const char *desc) -{ - gpmc_show_regs(cs, desc); - - pr_info("gpmc cs%i access configuration:\n", cs); - GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 4, 4, "time-para-granularity"); - GPMC_GET_RAW(GPMC_CS_CONFIG1, 8, 9, "mux-add-data"); - GPMC_GET_RAW(GPMC_CS_CONFIG1, 12, 13, "device-width"); - GPMC_GET_RAW(GPMC_CS_CONFIG1, 16, 17, "wait-pin"); - GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 21, 21, "wait-on-write"); - GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 22, 22, "wait-on-read"); - GPMC_GET_RAW_SHIFT(GPMC_CS_CONFIG1, 23, 24, 4, "burst-length"); - GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 27, 27, "sync-write"); - GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 28, 28, "burst-write"); - GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 29, 29, "gpmc,sync-read"); - GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 30, 30, "burst-read"); - GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 31, 31, "burst-wrap"); - - GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG2, 7, 7, "cs-extra-delay"); - - GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG3, 7, 7, "adv-extra-delay"); - - GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG4, 23, 23, "we-extra-delay"); - GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG4, 7, 7, "oe-extra-delay"); - - GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG6, 7, 7, "cycle2cycle-samecsen"); - GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG6, 6, 6, "cycle2cycle-diffcsen"); - - pr_info("gpmc cs%i timings configuration:\n", cs); - GPMC_GET_TICKS(GPMC_CS_CONFIG2, 0, 3, "cs-on-ns"); - GPMC_GET_TICKS(GPMC_CS_CONFIG2, 8, 12, "cs-rd-off-ns"); - GPMC_GET_TICKS(GPMC_CS_CONFIG2, 16, 20, "cs-wr-off-ns"); - - GPMC_GET_TICKS(GPMC_CS_CONFIG3, 0, 3, "adv-on-ns"); - GPMC_GET_TICKS(GPMC_CS_CONFIG3, 8, 12, "adv-rd-off-ns"); - GPMC_GET_TICKS(GPMC_CS_CONFIG3, 16, 20, "adv-wr-off-ns"); - - GPMC_GET_TICKS(GPMC_CS_CONFIG4, 0, 3, "oe-on-ns"); - GPMC_GET_TICKS(GPMC_CS_CONFIG4, 8, 12, "oe-off-ns"); - GPMC_GET_TICKS(GPMC_CS_CONFIG4, 16, 19, "we-on-ns"); - GPMC_GET_TICKS(GPMC_CS_CONFIG4, 24, 28, "we-off-ns"); - - GPMC_GET_TICKS(GPMC_CS_CONFIG5, 0, 4, "rd-cycle-ns"); - GPMC_GET_TICKS(GPMC_CS_CONFIG5, 8, 12, "wr-cycle-ns"); - GPMC_GET_TICKS(GPMC_CS_CONFIG5, 16, 20, "access-ns"); - - GPMC_GET_TICKS(GPMC_CS_CONFIG5, 24, 27, "page-burst-access-ns"); - - GPMC_GET_TICKS(GPMC_CS_CONFIG6, 0, 3, "bus-turnaround-ns"); - GPMC_GET_TICKS(GPMC_CS_CONFIG6, 8, 11, "cycle2cycle-delay-ns"); - - GPMC_GET_TICKS(GPMC_CS_CONFIG1, 18, 19, "wait-monitoring-ns"); - GPMC_GET_TICKS(GPMC_CS_CONFIG1, 25, 26, "clk-activation-ns"); - - GPMC_GET_TICKS(GPMC_CS_CONFIG6, 16, 19, "wr-data-mux-bus-ns"); - GPMC_GET_TICKS(GPMC_CS_CONFIG6, 24, 28, "wr-access-ns"); -} -#else -static inline void gpmc_cs_show_timings(int cs, const char *desc) -{ -} -#endif - -static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit, - int time, const char *name) -{ - u32 l; - int ticks, mask, nr_bits; - - if (time == 0) - ticks = 0; - else - ticks = gpmc_ns_to_ticks(time); - nr_bits = end_bit - st_bit + 1; - mask = (1 << nr_bits) - 1; - - if (ticks > mask) { - pr_err("%s: GPMC error! CS%d: %s: %d ns, %d ticks > %d\n", - __func__, cs, name, time, ticks, mask); - - return -1; - } - - l = gpmc_cs_read_reg(cs, reg); -#ifdef DEBUG - printk(KERN_INFO - "GPMC CS%d: %-10s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n", - cs, name, ticks, gpmc_get_fclk_period() * ticks / 1000, - (l >> st_bit) & mask, time); -#endif - l &= ~(mask << st_bit); - l |= ticks << st_bit; - gpmc_cs_write_reg(cs, reg, l); - - return 0; -} - -#define GPMC_SET_ONE(reg, st, end, field) \ - if (set_gpmc_timing_reg(cs, (reg), (st), (end), \ - t->field, #field) < 0) \ - return -1 - -int gpmc_calc_divider(unsigned int sync_clk) -{ - int div; - u32 l; - - l = sync_clk + (gpmc_get_fclk_period() - 1); - div = l / gpmc_get_fclk_period(); - if (div > 4) - return -1; - if (div <= 0) - div = 1; - - return div; -} - -int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t) -{ - int div; - u32 l; - - gpmc_cs_show_timings(cs, "before gpmc_cs_set_timings"); - div = gpmc_calc_divider(t->sync_clk); - if (div < 0) - return div; - - GPMC_SET_ONE(GPMC_CS_CONFIG2, 0, 3, cs_on); - GPMC_SET_ONE(GPMC_CS_CONFIG2, 8, 12, cs_rd_off); - GPMC_SET_ONE(GPMC_CS_CONFIG2, 16, 20, cs_wr_off); - - GPMC_SET_ONE(GPMC_CS_CONFIG3, 0, 3, adv_on); - GPMC_SET_ONE(GPMC_CS_CONFIG3, 8, 12, adv_rd_off); - GPMC_SET_ONE(GPMC_CS_CONFIG3, 16, 20, adv_wr_off); - - GPMC_SET_ONE(GPMC_CS_CONFIG4, 0, 3, oe_on); - GPMC_SET_ONE(GPMC_CS_CONFIG4, 8, 12, oe_off); - GPMC_SET_ONE(GPMC_CS_CONFIG4, 16, 19, we_on); - GPMC_SET_ONE(GPMC_CS_CONFIG4, 24, 28, we_off); - - GPMC_SET_ONE(GPMC_CS_CONFIG5, 0, 4, rd_cycle); - GPMC_SET_ONE(GPMC_CS_CONFIG5, 8, 12, wr_cycle); - GPMC_SET_ONE(GPMC_CS_CONFIG5, 16, 20, access); - - GPMC_SET_ONE(GPMC_CS_CONFIG5, 24, 27, page_burst_access); - - GPMC_SET_ONE(GPMC_CS_CONFIG6, 0, 3, bus_turnaround); - GPMC_SET_ONE(GPMC_CS_CONFIG6, 8, 11, cycle2cycle_delay); - - GPMC_SET_ONE(GPMC_CS_CONFIG1, 18, 19, wait_monitoring); - GPMC_SET_ONE(GPMC_CS_CONFIG1, 25, 26, clk_activation); - - if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS) - GPMC_SET_ONE(GPMC_CS_CONFIG6, 16, 19, wr_data_mux_bus); - if (gpmc_capability & GPMC_HAS_WR_ACCESS) - GPMC_SET_ONE(GPMC_CS_CONFIG6, 24, 28, wr_access); - - /* caller is expected to have initialized CONFIG1 to cover - * at least sync vs async - */ - l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1); - if (l & (GPMC_CONFIG1_READTYPE_SYNC | GPMC_CONFIG1_WRITETYPE_SYNC)) { -#ifdef DEBUG - printk(KERN_INFO "GPMC CS%d CLK period is %lu ns (div %d)\n", - cs, (div * gpmc_get_fclk_period()) / 1000, div); -#endif - l &= ~0x03; - l |= (div - 1); - gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, l); - } - - gpmc_cs_bool_timings(cs, &t->bool_timings); - gpmc_cs_show_timings(cs, "after gpmc_cs_set_timings"); - - return 0; -} - -static int gpmc_cs_set_memconf(int cs, u32 base, u32 size) -{ - u32 l; - u32 mask; - - /* - * Ensure that base address is aligned on a - * boundary equal to or greater than size. - */ - if (base & (size - 1)) - return -EINVAL; - - mask = (1 << GPMC_SECTION_SHIFT) - size; - l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7); - l &= ~0x3f; - l = (base >> GPMC_CHUNK_SHIFT) & 0x3f; - l &= ~(0x0f << 8); - l |= ((mask >> GPMC_CHUNK_SHIFT) & 0x0f) << 8; - l |= GPMC_CONFIG7_CSVALID; - gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l); - - return 0; -} - -static void gpmc_cs_enable_mem(int cs) -{ - u32 l; - - l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7); - l |= GPMC_CONFIG7_CSVALID; - gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l); -} - -static void gpmc_cs_disable_mem(int cs) -{ - u32 l; - - l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7); - l &= ~GPMC_CONFIG7_CSVALID; - gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l); -} - -static void gpmc_cs_get_memconf(int cs, u32 *base, u32 *size) -{ - u32 l; - u32 mask; - - l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7); - *base = (l & 0x3f) << GPMC_CHUNK_SHIFT; - mask = (l >> 8) & 0x0f; - *size = (1 << GPMC_SECTION_SHIFT) - (mask << GPMC_CHUNK_SHIFT); -} - -static int gpmc_cs_mem_enabled(int cs) -{ - u32 l; - - l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7); - return l & GPMC_CONFIG7_CSVALID; -} - -static void gpmc_cs_set_reserved(int cs, int reserved) -{ - struct gpmc_cs_data *gpmc = &gpmc_cs[cs]; - - gpmc->flags |= GPMC_CS_RESERVED; -} - -static bool gpmc_cs_reserved(int cs) -{ - struct gpmc_cs_data *gpmc = &gpmc_cs[cs]; - - return gpmc->flags & GPMC_CS_RESERVED; -} - -static void gpmc_cs_set_name(int cs, const char *name) -{ - struct gpmc_cs_data *gpmc = &gpmc_cs[cs]; - - gpmc->name = name; -} - -const char *gpmc_cs_get_name(int cs) -{ - struct gpmc_cs_data *gpmc = &gpmc_cs[cs]; - - return gpmc->name; -} - -static unsigned long gpmc_mem_align(unsigned long size) -{ - int order; - - size = (size - 1) >> (GPMC_CHUNK_SHIFT - 1); - order = GPMC_CHUNK_SHIFT - 1; - do { - size >>= 1; - order++; - } while (size); - size = 1 << order; - return size; -} - -static int gpmc_cs_insert_mem(int cs, unsigned long base, unsigned long size) -{ - struct gpmc_cs_data *gpmc = &gpmc_cs[cs]; - struct resource *res = &gpmc->mem; - int r; - - size = gpmc_mem_align(size); - spin_lock(&gpmc_mem_lock); - res->start = base; - res->end = base + size - 1; - r = request_resource(&gpmc_mem_root, res); - spin_unlock(&gpmc_mem_lock); - - return r; -} - -static int gpmc_cs_delete_mem(int cs) -{ - struct gpmc_cs_data *gpmc = &gpmc_cs[cs]; - struct resource *res = &gpmc->mem; - int r; - - spin_lock(&gpmc_mem_lock); - r = release_resource(res); - res->start = 0; - res->end = 0; - spin_unlock(&gpmc_mem_lock); - - return r; -} - -/** - * gpmc_cs_remap - remaps a chip-select physical base address - * @cs: chip-select to remap - * @base: physical base address to re-map chip-select to - * - * Re-maps a chip-select to a new physical base address specified by - * "base". Returns 0 on success and appropriate negative error code - * on failure. - */ -static int gpmc_cs_remap(int cs, u32 base) -{ - int ret; - u32 old_base, size; - - if (cs > gpmc_cs_num) { - pr_err("%s: requested chip-select is disabled\n", __func__); - return -ENODEV; - } - - /* - * Make sure we ignore any device offsets from the GPMC partition - * allocated for the chip select and that the new base confirms - * to the GPMC 16MB minimum granularity. - */ - base &= ~(SZ_16M - 1); - - gpmc_cs_get_memconf(cs, &old_base, &size); - if (base == old_base) - return 0; - - ret = gpmc_cs_delete_mem(cs); - if (ret < 0) - return ret; - - ret = gpmc_cs_insert_mem(cs, base, size); - if (ret < 0) - return ret; - - ret = gpmc_cs_set_memconf(cs, base, size); - - return ret; -} - -int gpmc_cs_request(int cs, unsigned long size, unsigned long *base) -{ - struct gpmc_cs_data *gpmc = &gpmc_cs[cs]; - struct resource *res = &gpmc->mem; - int r = -1; - - if (cs > gpmc_cs_num) { - pr_err("%s: requested chip-select is disabled\n", __func__); - return -ENODEV; - } - size = gpmc_mem_align(size); - if (size > (1 << GPMC_SECTION_SHIFT)) - return -ENOMEM; - - spin_lock(&gpmc_mem_lock); - if (gpmc_cs_reserved(cs)) { - r = -EBUSY; - goto out; - } - if (gpmc_cs_mem_enabled(cs)) - r = adjust_resource(res, res->start & ~(size - 1), size); - if (r < 0) - r = allocate_resource(&gpmc_mem_root, res, size, 0, ~0, - size, NULL, NULL); - if (r < 0) - goto out; - - /* Disable CS while changing base address and size mask */ - gpmc_cs_disable_mem(cs); - - r = gpmc_cs_set_memconf(cs, res->start, resource_size(res)); - if (r < 0) { - release_resource(res); - goto out; - } - - /* Enable CS */ - gpmc_cs_enable_mem(cs); - *base = res->start; - gpmc_cs_set_reserved(cs, 1); -out: - spin_unlock(&gpmc_mem_lock); - return r; -} -EXPORT_SYMBOL(gpmc_cs_request); - -void gpmc_cs_free(int cs) -{ - struct gpmc_cs_data *gpmc = &gpmc_cs[cs]; - struct resource *res = &gpmc->mem; - - spin_lock(&gpmc_mem_lock); - if (cs >= gpmc_cs_num || cs < 0 || !gpmc_cs_reserved(cs)) { - printk(KERN_ERR "Trying to free non-reserved GPMC CS%d\n", cs); - BUG(); - spin_unlock(&gpmc_mem_lock); - return; - } - gpmc_cs_disable_mem(cs); - if (res->flags) - release_resource(res); - gpmc_cs_set_reserved(cs, 0); - spin_unlock(&gpmc_mem_lock); -} -EXPORT_SYMBOL(gpmc_cs_free); - -/** - * gpmc_configure - write request to configure gpmc - * @cmd: command type - * @wval: value to write - * @return status of the operation - */ -int gpmc_configure(int cmd, int wval) -{ - u32 regval; - - switch (cmd) { - case GPMC_ENABLE_IRQ: - gpmc_write_reg(GPMC_IRQENABLE, wval); - break; - - case GPMC_SET_IRQ_STATUS: - gpmc_write_reg(GPMC_IRQSTATUS, wval); - break; - - case GPMC_CONFIG_WP: - regval = gpmc_read_reg(GPMC_CONFIG); - if (wval) - regval &= ~GPMC_CONFIG_WRITEPROTECT; /* WP is ON */ - else - regval |= GPMC_CONFIG_WRITEPROTECT; /* WP is OFF */ - gpmc_write_reg(GPMC_CONFIG, regval); - break; - - default: - pr_err("%s: command not supported\n", __func__); - return -EINVAL; - } - - return 0; -} -EXPORT_SYMBOL(gpmc_configure); - -void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs) -{ - int i; - - reg->gpmc_status = gpmc_base + GPMC_STATUS; - reg->gpmc_nand_command = gpmc_base + GPMC_CS0_OFFSET + - GPMC_CS_NAND_COMMAND + GPMC_CS_SIZE * cs; - reg->gpmc_nand_address = gpmc_base + GPMC_CS0_OFFSET + - GPMC_CS_NAND_ADDRESS + GPMC_CS_SIZE * cs; - reg->gpmc_nand_data = gpmc_base + GPMC_CS0_OFFSET + - GPMC_CS_NAND_DATA + GPMC_CS_SIZE * cs; - reg->gpmc_prefetch_config1 = gpmc_base + GPMC_PREFETCH_CONFIG1; - reg->gpmc_prefetch_config2 = gpmc_base + GPMC_PREFETCH_CONFIG2; - reg->gpmc_prefetch_control = gpmc_base + GPMC_PREFETCH_CONTROL; - reg->gpmc_prefetch_status = gpmc_base + GPMC_PREFETCH_STATUS; - reg->gpmc_ecc_config = gpmc_base + GPMC_ECC_CONFIG; - reg->gpmc_ecc_control = gpmc_base + GPMC_ECC_CONTROL; - reg->gpmc_ecc_size_config = gpmc_base + GPMC_ECC_SIZE_CONFIG; - reg->gpmc_ecc1_result = gpmc_base + GPMC_ECC1_RESULT; - - for (i = 0; i < GPMC_BCH_NUM_REMAINDER; i++) { - reg->gpmc_bch_result0[i] = gpmc_base + GPMC_ECC_BCH_RESULT_0 + - GPMC_BCH_SIZE * i; - reg->gpmc_bch_result1[i] = gpmc_base + GPMC_ECC_BCH_RESULT_1 + - GPMC_BCH_SIZE * i; - reg->gpmc_bch_result2[i] = gpmc_base + GPMC_ECC_BCH_RESULT_2 + - GPMC_BCH_SIZE * i; - reg->gpmc_bch_result3[i] = gpmc_base + GPMC_ECC_BCH_RESULT_3 + - GPMC_BCH_SIZE * i; - reg->gpmc_bch_result4[i] = gpmc_base + GPMC_ECC_BCH_RESULT_4 + - i * GPMC_BCH_SIZE; - reg->gpmc_bch_result5[i] = gpmc_base + GPMC_ECC_BCH_RESULT_5 + - i * GPMC_BCH_SIZE; - reg->gpmc_bch_result6[i] = gpmc_base + GPMC_ECC_BCH_RESULT_6 + - i * GPMC_BCH_SIZE; - } -} - -int gpmc_get_client_irq(unsigned irq_config) -{ - int i; - - if (hweight32(irq_config) > 1) - return 0; - - for (i = 0; i < GPMC_NR_IRQ; i++) - if (gpmc_client_irq[i].bitmask & irq_config) - return gpmc_client_irq[i].irq; - - return 0; -} - -static int gpmc_irq_endis(unsigned irq, bool endis) -{ - int i; - u32 regval; - - for (i = 0; i < GPMC_NR_IRQ; i++) - if (irq == gpmc_client_irq[i].irq) { - regval = gpmc_read_reg(GPMC_IRQENABLE); - if (endis) - regval |= gpmc_client_irq[i].bitmask; - else - regval &= ~gpmc_client_irq[i].bitmask; - gpmc_write_reg(GPMC_IRQENABLE, regval); - break; - } - - return 0; -} - -static void gpmc_irq_disable(struct irq_data *p) -{ - gpmc_irq_endis(p->irq, false); -} - -static void gpmc_irq_enable(struct irq_data *p) -{ - gpmc_irq_endis(p->irq, true); -} - -static void gpmc_irq_noop(struct irq_data *data) { } - -static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; } - -static int gpmc_setup_irq(void) -{ - int i; - u32 regval; - - if (!gpmc_irq) - return -EINVAL; - - gpmc_irq_start = irq_alloc_descs(-1, 0, GPMC_NR_IRQ, 0); - if (gpmc_irq_start < 0) { - pr_err("irq_alloc_descs failed\n"); - return gpmc_irq_start; - } - - gpmc_irq_chip.name = "gpmc"; - gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret; - gpmc_irq_chip.irq_enable = gpmc_irq_enable; - gpmc_irq_chip.irq_disable = gpmc_irq_disable; - gpmc_irq_chip.irq_shutdown = gpmc_irq_noop; - gpmc_irq_chip.irq_ack = gpmc_irq_noop; - gpmc_irq_chip.irq_mask = gpmc_irq_noop; - gpmc_irq_chip.irq_unmask = gpmc_irq_noop; - - gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE; - gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT; - - for (i = 0; i < GPMC_NR_IRQ; i++) { - gpmc_client_irq[i].irq = gpmc_irq_start + i; - irq_set_chip_and_handler(gpmc_client_irq[i].irq, - &gpmc_irq_chip, handle_simple_irq); - set_irq_flags(gpmc_client_irq[i].irq, - IRQF_VALID | IRQF_NOAUTOEN); - } - - /* Disable interrupts */ - gpmc_write_reg(GPMC_IRQENABLE, 0); - - /* clear interrupts */ - regval = gpmc_read_reg(GPMC_IRQSTATUS); - gpmc_write_reg(GPMC_IRQSTATUS, regval); - - return request_irq(gpmc_irq, gpmc_handle_irq, 0, "gpmc", NULL); -} - -static int gpmc_free_irq(void) -{ - int i; - - if (gpmc_irq) - free_irq(gpmc_irq, NULL); - - for (i = 0; i < GPMC_NR_IRQ; i++) { - irq_set_handler(gpmc_client_irq[i].irq, NULL); - irq_set_chip(gpmc_client_irq[i].irq, &no_irq_chip); - irq_modify_status(gpmc_client_irq[i].irq, 0, 0); - } - - irq_free_descs(gpmc_irq_start, GPMC_NR_IRQ); - - return 0; -} - -static void gpmc_mem_exit(void) -{ - int cs; - - for (cs = 0; cs < gpmc_cs_num; cs++) { - if (!gpmc_cs_mem_enabled(cs)) - continue; - gpmc_cs_delete_mem(cs); - } - -} - -static void gpmc_mem_init(void) -{ - int cs; - - /* - * The first 1MB of GPMC address space is typically mapped to - * the internal ROM. Never allocate the first page, to - * facilitate bug detection; even if we didn't boot from ROM. - */ - gpmc_mem_root.start = SZ_1M; - gpmc_mem_root.end = GPMC_MEM_END; - - /* Reserve all regions that has been set up by bootloader */ - for (cs = 0; cs < gpmc_cs_num; cs++) { - u32 base, size; - - if (!gpmc_cs_mem_enabled(cs)) - continue; - gpmc_cs_get_memconf(cs, &base, &size); - if (gpmc_cs_insert_mem(cs, base, size)) { - pr_warn("%s: disabling cs %d mapped at 0x%x-0x%x\n", - __func__, cs, base, base + size); - gpmc_cs_disable_mem(cs); - } - } -} - -static u32 gpmc_round_ps_to_sync_clk(u32 time_ps, u32 sync_clk) -{ - u32 temp; - int div; - - div = gpmc_calc_divider(sync_clk); - temp = gpmc_ps_to_ticks(time_ps); - temp = (temp + div - 1) / div; - return gpmc_ticks_to_ps(temp * div); -} - -/* XXX: can the cycles be avoided ? */ -static int gpmc_calc_sync_read_timings(struct gpmc_timings *gpmc_t, - struct gpmc_device_timings *dev_t, - bool mux) -{ - u32 temp; - - /* adv_rd_off */ - temp = dev_t->t_avdp_r; - /* XXX: mux check required ? */ - if (mux) { - /* XXX: t_avdp not to be required for sync, only added for tusb - * this indirectly necessitates requirement of t_avdp_r and - * t_avdp_w instead of having a single t_avdp - */ - temp = max_t(u32, temp, gpmc_t->clk_activation + dev_t->t_avdh); - temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp); - } - gpmc_t->adv_rd_off = gpmc_round_ps_to_ticks(temp); - - /* oe_on */ - temp = dev_t->t_oeasu; /* XXX: remove this ? */ - if (mux) { - temp = max_t(u32, temp, gpmc_t->clk_activation + dev_t->t_ach); - temp = max_t(u32, temp, gpmc_t->adv_rd_off + - gpmc_ticks_to_ps(dev_t->cyc_aavdh_oe)); - } - gpmc_t->oe_on = gpmc_round_ps_to_ticks(temp); - - /* access */ - /* XXX: any scope for improvement ?, by combining oe_on - * and clk_activation, need to check whether - * access = clk_activation + round to sync clk ? - */ - temp = max_t(u32, dev_t->t_iaa, dev_t->cyc_iaa * gpmc_t->sync_clk); - temp += gpmc_t->clk_activation; - if (dev_t->cyc_oe) - temp = max_t(u32, temp, gpmc_t->oe_on + - gpmc_ticks_to_ps(dev_t->cyc_oe)); - gpmc_t->access = gpmc_round_ps_to_ticks(temp); - - gpmc_t->oe_off = gpmc_t->access + gpmc_ticks_to_ps(1); - gpmc_t->cs_rd_off = gpmc_t->oe_off; - - /* rd_cycle */ - temp = max_t(u32, dev_t->t_cez_r, dev_t->t_oez); - temp = gpmc_round_ps_to_sync_clk(temp, gpmc_t->sync_clk) + - gpmc_t->access; - /* XXX: barter t_ce_rdyz with t_cez_r ? */ - if (dev_t->t_ce_rdyz) - temp = max_t(u32, temp, gpmc_t->cs_rd_off + dev_t->t_ce_rdyz); - gpmc_t->rd_cycle = gpmc_round_ps_to_ticks(temp); - - return 0; -} - -static int gpmc_calc_sync_write_timings(struct gpmc_timings *gpmc_t, - struct gpmc_device_timings *dev_t, - bool mux) -{ - u32 temp; - - /* adv_wr_off */ - temp = dev_t->t_avdp_w; - if (mux) { - temp = max_t(u32, temp, - gpmc_t->clk_activation + dev_t->t_avdh); - temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp); - } - gpmc_t->adv_wr_off = gpmc_round_ps_to_ticks(temp); - - /* wr_data_mux_bus */ - temp = max_t(u32, dev_t->t_weasu, - gpmc_t->clk_activation + dev_t->t_rdyo); - /* XXX: shouldn't mux be kept as a whole for wr_data_mux_bus ?, - * and in that case remember to handle we_on properly - */ - if (mux) { - temp = max_t(u32, temp, - gpmc_t->adv_wr_off + dev_t->t_aavdh); - temp = max_t(u32, temp, gpmc_t->adv_wr_off + - gpmc_ticks_to_ps(dev_t->cyc_aavdh_we)); - } - gpmc_t->wr_data_mux_bus = gpmc_round_ps_to_ticks(temp); - - /* we_on */ - if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS) - gpmc_t->we_on = gpmc_round_ps_to_ticks(dev_t->t_weasu); - else - gpmc_t->we_on = gpmc_t->wr_data_mux_bus; - - /* wr_access */ - /* XXX: gpmc_capability check reqd ? , even if not, will not harm */ - gpmc_t->wr_access = gpmc_t->access; - - /* we_off */ - temp = gpmc_t->we_on + dev_t->t_wpl; - temp = max_t(u32, temp, - gpmc_t->wr_access + gpmc_ticks_to_ps(1)); - temp = max_t(u32, temp, - gpmc_t->we_on + gpmc_ticks_to_ps(dev_t->cyc_wpl)); - gpmc_t->we_off = gpmc_round_ps_to_ticks(temp); - - gpmc_t->cs_wr_off = gpmc_round_ps_to_ticks(gpmc_t->we_off + - dev_t->t_wph); - - /* wr_cycle */ - temp = gpmc_round_ps_to_sync_clk(dev_t->t_cez_w, gpmc_t->sync_clk); - temp += gpmc_t->wr_access; - /* XXX: barter t_ce_rdyz with t_cez_w ? */ - if (dev_t->t_ce_rdyz) - temp = max_t(u32, temp, - gpmc_t->cs_wr_off + dev_t->t_ce_rdyz); - gpmc_t->wr_cycle = gpmc_round_ps_to_ticks(temp); - - return 0; -} - -static int gpmc_calc_async_read_timings(struct gpmc_timings *gpmc_t, - struct gpmc_device_timings *dev_t, - bool mux) -{ - u32 temp; - - /* adv_rd_off */ - temp = dev_t->t_avdp_r; - if (mux) - temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp); - gpmc_t->adv_rd_off = gpmc_round_ps_to_ticks(temp); - - /* oe_on */ - temp = dev_t->t_oeasu; - if (mux) - temp = max_t(u32, temp, - gpmc_t->adv_rd_off + dev_t->t_aavdh); - gpmc_t->oe_on = gpmc_round_ps_to_ticks(temp); - - /* access */ - temp = max_t(u32, dev_t->t_iaa, /* XXX: remove t_iaa in async ? */ - gpmc_t->oe_on + dev_t->t_oe); - temp = max_t(u32, temp, - gpmc_t->cs_on + dev_t->t_ce); - temp = max_t(u32, temp, - gpmc_t->adv_on + dev_t->t_aa); - gpmc_t->access = gpmc_round_ps_to_ticks(temp); - - gpmc_t->oe_off = gpmc_t->access + gpmc_ticks_to_ps(1); - gpmc_t->cs_rd_off = gpmc_t->oe_off; - - /* rd_cycle */ - temp = max_t(u32, dev_t->t_rd_cycle, - gpmc_t->cs_rd_off + dev_t->t_cez_r); - temp = max_t(u32, temp, gpmc_t->oe_off + dev_t->t_oez); - gpmc_t->rd_cycle = gpmc_round_ps_to_ticks(temp); - - return 0; -} - -static int gpmc_calc_async_write_timings(struct gpmc_timings *gpmc_t, - struct gpmc_device_timings *dev_t, - bool mux) -{ - u32 temp; - - /* adv_wr_off */ - temp = dev_t->t_avdp_w; - if (mux) - temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp); - gpmc_t->adv_wr_off = gpmc_round_ps_to_ticks(temp); - - /* wr_data_mux_bus */ - temp = dev_t->t_weasu; - if (mux) { - temp = max_t(u32, temp, gpmc_t->adv_wr_off + dev_t->t_aavdh); - temp = max_t(u32, temp, gpmc_t->adv_wr_off + - gpmc_ticks_to_ps(dev_t->cyc_aavdh_we)); - } - gpmc_t->wr_data_mux_bus = gpmc_round_ps_to_ticks(temp); - - /* we_on */ - if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS) - gpmc_t->we_on = gpmc_round_ps_to_ticks(dev_t->t_weasu); - else - gpmc_t->we_on = gpmc_t->wr_data_mux_bus; - - /* we_off */ - temp = gpmc_t->we_on + dev_t->t_wpl; - gpmc_t->we_off = gpmc_round_ps_to_ticks(temp); - - gpmc_t->cs_wr_off = gpmc_round_ps_to_ticks(gpmc_t->we_off + - dev_t->t_wph); - - /* wr_cycle */ - temp = max_t(u32, dev_t->t_wr_cycle, - gpmc_t->cs_wr_off + dev_t->t_cez_w); - gpmc_t->wr_cycle = gpmc_round_ps_to_ticks(temp); - - return 0; -} - -static int gpmc_calc_sync_common_timings(struct gpmc_timings *gpmc_t, - struct gpmc_device_timings *dev_t) -{ - u32 temp; - - gpmc_t->sync_clk = gpmc_calc_divider(dev_t->clk) * - gpmc_get_fclk_period(); - - gpmc_t->page_burst_access = gpmc_round_ps_to_sync_clk( - dev_t->t_bacc, - gpmc_t->sync_clk); - - temp = max_t(u32, dev_t->t_ces, dev_t->t_avds); - gpmc_t->clk_activation = gpmc_round_ps_to_ticks(temp); - - if (gpmc_calc_divider(gpmc_t->sync_clk) != 1) - return 0; - - if (dev_t->ce_xdelay) - gpmc_t->bool_timings.cs_extra_delay = true; - if (dev_t->avd_xdelay) - gpmc_t->bool_timings.adv_extra_delay = true; - if (dev_t->oe_xdelay) - gpmc_t->bool_timings.oe_extra_delay = true; - if (dev_t->we_xdelay) - gpmc_t->bool_timings.we_extra_delay = true; - - return 0; -} - -static int gpmc_calc_common_timings(struct gpmc_timings *gpmc_t, - struct gpmc_device_timings *dev_t, - bool sync) -{ - u32 temp; - - /* cs_on */ - gpmc_t->cs_on = gpmc_round_ps_to_ticks(dev_t->t_ceasu); - - /* adv_on */ - temp = dev_t->t_avdasu; - if (dev_t->t_ce_avd) - temp = max_t(u32, temp, - gpmc_t->cs_on + dev_t->t_ce_avd); - gpmc_t->adv_on = gpmc_round_ps_to_ticks(temp); - - if (sync) - gpmc_calc_sync_common_timings(gpmc_t, dev_t); - - return 0; -} - -/* TODO: remove this function once all peripherals are confirmed to - * work with generic timing. Simultaneously gpmc_cs_set_timings() - * has to be modified to handle timings in ps instead of ns -*/ -static void gpmc_convert_ps_to_ns(struct gpmc_timings *t) -{ - t->cs_on /= 1000; - t->cs_rd_off /= 1000; - t->cs_wr_off /= 1000; - t->adv_on /= 1000; - t->adv_rd_off /= 1000; - t->adv_wr_off /= 1000; - t->we_on /= 1000; - t->we_off /= 1000; - t->oe_on /= 1000; - t->oe_off /= 1000; - t->page_burst_access /= 1000; - t->access /= 1000; - t->rd_cycle /= 1000; - t->wr_cycle /= 1000; - t->bus_turnaround /= 1000; - t->cycle2cycle_delay /= 1000; - t->wait_monitoring /= 1000; - t->clk_activation /= 1000; - t->wr_access /= 1000; - t->wr_data_mux_bus /= 1000; -} - -int gpmc_calc_timings(struct gpmc_timings *gpmc_t, - struct gpmc_settings *gpmc_s, - struct gpmc_device_timings *dev_t) -{ - bool mux = false, sync = false; - - if (gpmc_s) { - mux = gpmc_s->mux_add_data ? true : false; - sync = (gpmc_s->sync_read || gpmc_s->sync_write); - } - - memset(gpmc_t, 0, sizeof(*gpmc_t)); - - gpmc_calc_common_timings(gpmc_t, dev_t, sync); - - if (gpmc_s && gpmc_s->sync_read) - gpmc_calc_sync_read_timings(gpmc_t, dev_t, mux); - else - gpmc_calc_async_read_timings(gpmc_t, dev_t, mux); - - if (gpmc_s && gpmc_s->sync_write) - gpmc_calc_sync_write_timings(gpmc_t, dev_t, mux); - else - gpmc_calc_async_write_timings(gpmc_t, dev_t, mux); - - /* TODO: remove, see function definition */ - gpmc_convert_ps_to_ns(gpmc_t); - - return 0; -} - -/** - * gpmc_cs_program_settings - programs non-timing related settings - * @cs: GPMC chip-select to program - * @p: pointer to GPMC settings structure - * - * Programs non-timing related settings for a GPMC chip-select, such as - * bus-width, burst configuration, etc. Function should be called once - * for each chip-select that is being used and must be called before - * calling gpmc_cs_set_timings() as timing parameters in the CONFIG1 - * register will be initialised to zero by this function. Returns 0 on - * success and appropriate negative error code on failure. - */ -int gpmc_cs_program_settings(int cs, struct gpmc_settings *p) -{ - u32 config1; - - if ((!p->device_width) || (p->device_width > GPMC_DEVWIDTH_16BIT)) { - pr_err("%s: invalid width %d!", __func__, p->device_width); - return -EINVAL; - } - - /* Address-data multiplexing not supported for NAND devices */ - if (p->device_nand && p->mux_add_data) { - pr_err("%s: invalid configuration!\n", __func__); - return -EINVAL; - } - - if ((p->mux_add_data > GPMC_MUX_AD) || - ((p->mux_add_data == GPMC_MUX_AAD) && - !(gpmc_capability & GPMC_HAS_MUX_AAD))) { - pr_err("%s: invalid multiplex configuration!\n", __func__); - return -EINVAL; - } - - /* Page/burst mode supports lengths of 4, 8 and 16 bytes */ - if (p->burst_read || p->burst_write) { - switch (p->burst_len) { - case GPMC_BURST_4: - case GPMC_BURST_8: - case GPMC_BURST_16: - break; - default: - pr_err("%s: invalid page/burst-length (%d)\n", - __func__, p->burst_len); - return -EINVAL; - } - } - - if (p->wait_pin > gpmc_nr_waitpins) { - pr_err("%s: invalid wait-pin (%d)\n", __func__, p->wait_pin); - return -EINVAL; - } - - config1 = GPMC_CONFIG1_DEVICESIZE((p->device_width - 1)); - - if (p->sync_read) - config1 |= GPMC_CONFIG1_READTYPE_SYNC; - if (p->sync_write) - config1 |= GPMC_CONFIG1_WRITETYPE_SYNC; - if (p->wait_on_read) - config1 |= GPMC_CONFIG1_WAIT_READ_MON; - if (p->wait_on_write) - config1 |= GPMC_CONFIG1_WAIT_WRITE_MON; - if (p->wait_on_read || p->wait_on_write) - config1 |= GPMC_CONFIG1_WAIT_PIN_SEL(p->wait_pin); - if (p->device_nand) - config1 |= GPMC_CONFIG1_DEVICETYPE(GPMC_DEVICETYPE_NAND); - if (p->mux_add_data) - config1 |= GPMC_CONFIG1_MUXTYPE(p->mux_add_data); - if (p->burst_read) - config1 |= GPMC_CONFIG1_READMULTIPLE_SUPP; - if (p->burst_write) - config1 |= GPMC_CONFIG1_WRITEMULTIPLE_SUPP; - if (p->burst_read || p->burst_write) { - config1 |= GPMC_CONFIG1_PAGE_LEN(p->burst_len >> 3); - config1 |= p->burst_wrap ? GPMC_CONFIG1_WRAPBURST_SUPP : 0; - } - - gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, config1); - - return 0; -} - -#ifdef CONFIG_OF -static const struct of_device_id gpmc_dt_ids[] = { - { .compatible = "ti,omap2420-gpmc" }, - { .compatible = "ti,omap2430-gpmc" }, - { .compatible = "ti,omap3430-gpmc" }, /* omap3430 & omap3630 */ - { .compatible = "ti,omap4430-gpmc" }, /* omap4430 & omap4460 & omap543x */ - { .compatible = "ti,am3352-gpmc" }, /* am335x devices */ - { } -}; -MODULE_DEVICE_TABLE(of, gpmc_dt_ids); - -/** - * gpmc_read_settings_dt - read gpmc settings from device-tree - * @np: pointer to device-tree node for a gpmc child device - * @p: pointer to gpmc settings structure - * - * Reads the GPMC settings for a GPMC child device from device-tree and - * stores them in the GPMC settings structure passed. The GPMC settings - * structure is initialised to zero by this function and so any - * previously stored settings will be cleared. - */ -void gpmc_read_settings_dt(struct device_node *np, struct gpmc_settings *p) -{ - memset(p, 0, sizeof(struct gpmc_settings)); - - p->sync_read = of_property_read_bool(np, "gpmc,sync-read"); - p->sync_write = of_property_read_bool(np, "gpmc,sync-write"); - of_property_read_u32(np, "gpmc,device-width", &p->device_width); - of_property_read_u32(np, "gpmc,mux-add-data", &p->mux_add_data); - - if (!of_property_read_u32(np, "gpmc,burst-length", &p->burst_len)) { - p->burst_wrap = of_property_read_bool(np, "gpmc,burst-wrap"); - p->burst_read = of_property_read_bool(np, "gpmc,burst-read"); - p->burst_write = of_property_read_bool(np, "gpmc,burst-write"); - if (!p->burst_read && !p->burst_write) - pr_warn("%s: page/burst-length set but not used!\n", - __func__); - } - - if (!of_property_read_u32(np, "gpmc,wait-pin", &p->wait_pin)) { - p->wait_on_read = of_property_read_bool(np, - "gpmc,wait-on-read"); - p->wait_on_write = of_property_read_bool(np, - "gpmc,wait-on-write"); - if (!p->wait_on_read && !p->wait_on_write) - pr_debug("%s: rd/wr wait monitoring not enabled!\n", - __func__); - } -} - -static void __maybe_unused gpmc_read_timings_dt(struct device_node *np, - struct gpmc_timings *gpmc_t) -{ - struct gpmc_bool_timings *p; - - if (!np || !gpmc_t) - return; - - memset(gpmc_t, 0, sizeof(*gpmc_t)); - - /* minimum clock period for syncronous mode */ - of_property_read_u32(np, "gpmc,sync-clk-ps", &gpmc_t->sync_clk); - - /* chip select timtings */ - of_property_read_u32(np, "gpmc,cs-on-ns", &gpmc_t->cs_on); - of_property_read_u32(np, "gpmc,cs-rd-off-ns", &gpmc_t->cs_rd_off); - of_property_read_u32(np, "gpmc,cs-wr-off-ns", &gpmc_t->cs_wr_off); - - /* ADV signal timings */ - of_property_read_u32(np, "gpmc,adv-on-ns", &gpmc_t->adv_on); - of_property_read_u32(np, "gpmc,adv-rd-off-ns", &gpmc_t->adv_rd_off); - of_property_read_u32(np, "gpmc,adv-wr-off-ns", &gpmc_t->adv_wr_off); - - /* WE signal timings */ - of_property_read_u32(np, "gpmc,we-on-ns", &gpmc_t->we_on); - of_property_read_u32(np, "gpmc,we-off-ns", &gpmc_t->we_off); - - /* OE signal timings */ - of_property_read_u32(np, "gpmc,oe-on-ns", &gpmc_t->oe_on); - of_property_read_u32(np, "gpmc,oe-off-ns", &gpmc_t->oe_off); - - /* access and cycle timings */ - of_property_read_u32(np, "gpmc,page-burst-access-ns", - &gpmc_t->page_burst_access); - of_property_read_u32(np, "gpmc,access-ns", &gpmc_t->access); - of_property_read_u32(np, "gpmc,rd-cycle-ns", &gpmc_t->rd_cycle); - of_property_read_u32(np, "gpmc,wr-cycle-ns", &gpmc_t->wr_cycle); - of_property_read_u32(np, "gpmc,bus-turnaround-ns", - &gpmc_t->bus_turnaround); - of_property_read_u32(np, "gpmc,cycle2cycle-delay-ns", - &gpmc_t->cycle2cycle_delay); - of_property_read_u32(np, "gpmc,wait-monitoring-ns", - &gpmc_t->wait_monitoring); - of_property_read_u32(np, "gpmc,clk-activation-ns", - &gpmc_t->clk_activation); - - /* only applicable to OMAP3+ */ - of_property_read_u32(np, "gpmc,wr-access-ns", &gpmc_t->wr_access); - of_property_read_u32(np, "gpmc,wr-data-mux-bus-ns", - &gpmc_t->wr_data_mux_bus); - - /* bool timing parameters */ - p = &gpmc_t->bool_timings; - - p->cycle2cyclediffcsen = - of_property_read_bool(np, "gpmc,cycle2cycle-diffcsen"); - p->cycle2cyclesamecsen = - of_property_read_bool(np, "gpmc,cycle2cycle-samecsen"); - p->we_extra_delay = of_property_read_bool(np, "gpmc,we-extra-delay"); - p->oe_extra_delay = of_property_read_bool(np, "gpmc,oe-extra-delay"); - p->adv_extra_delay = of_property_read_bool(np, "gpmc,adv-extra-delay"); - p->cs_extra_delay = of_property_read_bool(np, "gpmc,cs-extra-delay"); - p->time_para_granularity = - of_property_read_bool(np, "gpmc,time-para-granularity"); -} - -#if IS_ENABLED(CONFIG_MTD_NAND) - -static const char * const nand_xfer_types[] = { - [NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled", - [NAND_OMAP_POLLED] = "polled", - [NAND_OMAP_PREFETCH_DMA] = "prefetch-dma", - [NAND_OMAP_PREFETCH_IRQ] = "prefetch-irq", -}; - -static int gpmc_probe_nand_child(struct platform_device *pdev, - struct device_node *child) -{ - u32 val; - const char *s; - struct gpmc_timings gpmc_t; - struct omap_nand_platform_data *gpmc_nand_data; - - if (of_property_read_u32(child, "reg", &val) < 0) { - dev_err(&pdev->dev, "%s has no 'reg' property\n", - child->full_name); - return -ENODEV; - } - - gpmc_nand_data = devm_kzalloc(&pdev->dev, sizeof(*gpmc_nand_data), - GFP_KERNEL); - if (!gpmc_nand_data) - return -ENOMEM; - - gpmc_nand_data->cs = val; - gpmc_nand_data->of_node = child; - - /* Detect availability of ELM module */ - gpmc_nand_data->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0); - if (gpmc_nand_data->elm_of_node == NULL) - gpmc_nand_data->elm_of_node = - of_parse_phandle(child, "elm_id", 0); - if (gpmc_nand_data->elm_of_node == NULL) - pr_warn("%s: ti,elm-id property not found\n", __func__); - - /* select ecc-scheme for NAND */ - if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) { - pr_err("%s: ti,nand-ecc-opt not found\n", __func__); - return -ENODEV; - } - - if (!strcmp(s, "sw")) - gpmc_nand_data->ecc_opt = OMAP_ECC_HAM1_CODE_SW; - else if (!strcmp(s, "ham1") || - !strcmp(s, "hw") || !strcmp(s, "hw-romcode")) - gpmc_nand_data->ecc_opt = - OMAP_ECC_HAM1_CODE_HW; - else if (!strcmp(s, "bch4")) - if (gpmc_nand_data->elm_of_node) - gpmc_nand_data->ecc_opt = - OMAP_ECC_BCH4_CODE_HW; - else - gpmc_nand_data->ecc_opt = - OMAP_ECC_BCH4_CODE_HW_DETECTION_SW; - else if (!strcmp(s, "bch8")) - if (gpmc_nand_data->elm_of_node) - gpmc_nand_data->ecc_opt = - OMAP_ECC_BCH8_CODE_HW; - else - gpmc_nand_data->ecc_opt = - OMAP_ECC_BCH8_CODE_HW_DETECTION_SW; - else if (!strcmp(s, "bch16")) - if (gpmc_nand_data->elm_of_node) - gpmc_nand_data->ecc_opt = - OMAP_ECC_BCH16_CODE_HW; - else - pr_err("%s: BCH16 requires ELM support\n", __func__); - else - pr_err("%s: ti,nand-ecc-opt invalid value\n", __func__); - - /* select data transfer mode for NAND controller */ - if (!of_property_read_string(child, "ti,nand-xfer-type", &s)) - for (val = 0; val < ARRAY_SIZE(nand_xfer_types); val++) - if (!strcasecmp(s, nand_xfer_types[val])) { - gpmc_nand_data->xfer_type = val; - break; - } - - gpmc_nand_data->flash_bbt = of_get_nand_on_flash_bbt(child); - - val = of_get_nand_bus_width(child); - if (val == 16) - gpmc_nand_data->devsize = NAND_BUSWIDTH_16; - - gpmc_read_timings_dt(child, &gpmc_t); - gpmc_nand_init(gpmc_nand_data, &gpmc_t); - - return 0; -} -#else -static int gpmc_probe_nand_child(struct platform_device *pdev, - struct device_node *child) -{ - return 0; -} -#endif - -#if IS_ENABLED(CONFIG_MTD_ONENAND) -static int gpmc_probe_onenand_child(struct platform_device *pdev, - struct device_node *child) -{ - u32 val; - struct omap_onenand_platform_data *gpmc_onenand_data; - - if (of_property_read_u32(child, "reg", &val) < 0) { - dev_err(&pdev->dev, "%s has no 'reg' property\n", - child->full_name); - return -ENODEV; - } - - gpmc_onenand_data = devm_kzalloc(&pdev->dev, sizeof(*gpmc_onenand_data), - GFP_KERNEL); - if (!gpmc_onenand_data) - return -ENOMEM; - - gpmc_onenand_data->cs = val; - gpmc_onenand_data->of_node = child; - gpmc_onenand_data->dma_channel = -1; - - if (!of_property_read_u32(child, "dma-channel", &val)) - gpmc_onenand_data->dma_channel = val; - - gpmc_onenand_init(gpmc_onenand_data); - - return 0; -} -#else -static int gpmc_probe_onenand_child(struct platform_device *pdev, - struct device_node *child) -{ - return 0; -} -#endif - -/** - * gpmc_probe_generic_child - configures the gpmc for a child device - * @pdev: pointer to gpmc platform device - * @child: pointer to device-tree node for child device - * - * Allocates and configures a GPMC chip-select for a child device. - * Returns 0 on success and appropriate negative error code on failure. - */ -static int gpmc_probe_generic_child(struct platform_device *pdev, - struct device_node *child) -{ - struct gpmc_settings gpmc_s; - struct gpmc_timings gpmc_t; - struct resource res; - unsigned long base; - const char *name; - int ret, cs; - u32 val; - - if (of_property_read_u32(child, "reg", &cs) < 0) { - dev_err(&pdev->dev, "%s has no 'reg' property\n", - child->full_name); - return -ENODEV; - } - - if (of_address_to_resource(child, 0, &res) < 0) { - dev_err(&pdev->dev, "%s has malformed 'reg' property\n", - child->full_name); - return -ENODEV; - } - - /* - * Check if we have multiple instances of the same device - * on a single chip select. If so, use the already initialized - * timings. - */ - name = gpmc_cs_get_name(cs); - if (name && child->name && of_node_cmp(child->name, name) == 0) - goto no_timings; - - ret = gpmc_cs_request(cs, resource_size(&res), &base); - if (ret < 0) { - dev_err(&pdev->dev, "cannot request GPMC CS %d\n", cs); - return ret; - } - gpmc_cs_set_name(cs, child->name); - - gpmc_read_settings_dt(child, &gpmc_s); - gpmc_read_timings_dt(child, &gpmc_t); - - /* - * For some GPMC devices we still need to rely on the bootloader - * timings because the devices can be connected via FPGA. - * REVISIT: Add timing support from slls644g.pdf. - */ - if (!gpmc_t.cs_rd_off) { - WARN(1, "enable GPMC debug to configure .dts timings for CS%i\n", - cs); - gpmc_cs_show_timings(cs, - "please add GPMC bootloader timings to .dts"); - goto no_timings; - } - - /* CS must be disabled while making changes to gpmc configuration */ - gpmc_cs_disable_mem(cs); - - /* - * FIXME: gpmc_cs_request() will map the CS to an arbitary - * location in the gpmc address space. When booting with - * device-tree we want the NOR flash to be mapped to the - * location specified in the device-tree blob. So remap the - * CS to this location. Once DT migration is complete should - * just make gpmc_cs_request() map a specific address. - */ - ret = gpmc_cs_remap(cs, res.start); - if (ret < 0) { - dev_err(&pdev->dev, "cannot remap GPMC CS %d to %pa\n", - cs, &res.start); - goto err; - } - - ret = of_property_read_u32(child, "bank-width", &gpmc_s.device_width); - if (ret < 0) - goto err; - - ret = gpmc_cs_program_settings(cs, &gpmc_s); - if (ret < 0) - goto err; - - ret = gpmc_cs_set_timings(cs, &gpmc_t); - if (ret) { - dev_err(&pdev->dev, "failed to set gpmc timings for: %s\n", - child->name); - goto err; - } - - /* Clear limited address i.e. enable A26-A11 */ - val = gpmc_read_reg(GPMC_CONFIG); - val &= ~GPMC_CONFIG_LIMITEDADDRESS; - gpmc_write_reg(GPMC_CONFIG, val); - - /* Enable CS region */ - gpmc_cs_enable_mem(cs); - -no_timings: - if (of_platform_device_create(child, NULL, &pdev->dev)) - return 0; - - dev_err(&pdev->dev, "failed to create gpmc child %s\n", child->name); - ret = -ENODEV; - -err: - gpmc_cs_free(cs); - - return ret; -} - -static int gpmc_probe_dt(struct platform_device *pdev) -{ - int ret; - struct device_node *child; - const struct of_device_id *of_id = - of_match_device(gpmc_dt_ids, &pdev->dev); - - if (!of_id) - return 0; - - ret = of_property_read_u32(pdev->dev.of_node, "gpmc,num-cs", - &gpmc_cs_num); - if (ret < 0) { - pr_err("%s: number of chip-selects not defined\n", __func__); - return ret; - } else if (gpmc_cs_num < 1) { - pr_err("%s: all chip-selects are disabled\n", __func__); - return -EINVAL; - } else if (gpmc_cs_num > GPMC_CS_NUM) { - pr_err("%s: number of supported chip-selects cannot be > %d\n", - __func__, GPMC_CS_NUM); - return -EINVAL; - } - - ret = of_property_read_u32(pdev->dev.of_node, "gpmc,num-waitpins", - &gpmc_nr_waitpins); - if (ret < 0) { - pr_err("%s: number of wait pins not found!\n", __func__); - return ret; - } - - for_each_available_child_of_node(pdev->dev.of_node, child) { - - if (!child->name) - continue; - - if (of_node_cmp(child->name, "nand") == 0) - ret = gpmc_probe_nand_child(pdev, child); - else if (of_node_cmp(child->name, "onenand") == 0) - ret = gpmc_probe_onenand_child(pdev, child); - else if (of_node_cmp(child->name, "ethernet") == 0 || - of_node_cmp(child->name, "nor") == 0 || - of_node_cmp(child->name, "uart") == 0) - ret = gpmc_probe_generic_child(pdev, child); - - if (WARN(ret < 0, "%s: probing gpmc child %s failed\n", - __func__, child->full_name)) - of_node_put(child); - } - - return 0; -} -#else -static int gpmc_probe_dt(struct platform_device *pdev) -{ - return 0; -} -#endif - -static int gpmc_probe(struct platform_device *pdev) -{ - int rc; - u32 l; - struct resource *res; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res == NULL) - return -ENOENT; - - phys_base = res->start; - mem_size = resource_size(res); - - gpmc_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(gpmc_base)) - return PTR_ERR(gpmc_base); - - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (res == NULL) - dev_warn(&pdev->dev, "Failed to get resource: irq\n"); - else - gpmc_irq = res->start; - - gpmc_l3_clk = devm_clk_get(&pdev->dev, "fck"); - if (IS_ERR(gpmc_l3_clk)) { - dev_err(&pdev->dev, "Failed to get GPMC fck\n"); - gpmc_irq = 0; - return PTR_ERR(gpmc_l3_clk); - } - - if (!clk_get_rate(gpmc_l3_clk)) { - dev_err(&pdev->dev, "Invalid GPMC fck clock rate\n"); - return -EINVAL; - } - - pm_runtime_enable(&pdev->dev); - pm_runtime_get_sync(&pdev->dev); - - gpmc_dev = &pdev->dev; - - l = gpmc_read_reg(GPMC_REVISION); - - /* - * FIXME: Once device-tree migration is complete the below flags - * should be populated based upon the device-tree compatible - * string. For now just use the IP revision. OMAP3+ devices have - * the wr_access and wr_data_mux_bus register fields. OMAP4+ - * devices support the addr-addr-data multiplex protocol. - * - * GPMC IP revisions: - * - OMAP24xx = 2.0 - * - OMAP3xxx = 5.0 - * - OMAP44xx/54xx/AM335x = 6.0 - */ - if (GPMC_REVISION_MAJOR(l) > 0x4) - gpmc_capability = GPMC_HAS_WR_ACCESS | GPMC_HAS_WR_DATA_MUX_BUS; - if (GPMC_REVISION_MAJOR(l) > 0x5) - gpmc_capability |= GPMC_HAS_MUX_AAD; - dev_info(gpmc_dev, "GPMC revision %d.%d\n", GPMC_REVISION_MAJOR(l), - GPMC_REVISION_MINOR(l)); - - gpmc_mem_init(); - - if (gpmc_setup_irq() < 0) - dev_warn(gpmc_dev, "gpmc_setup_irq failed\n"); - - if (!pdev->dev.of_node) { - gpmc_cs_num = GPMC_CS_NUM; - gpmc_nr_waitpins = GPMC_NR_WAITPINS; - } - - rc = gpmc_probe_dt(pdev); - if (rc < 0) { - pm_runtime_put_sync(&pdev->dev); - dev_err(gpmc_dev, "failed to probe DT parameters\n"); - return rc; - } - - return 0; -} - -static int gpmc_remove(struct platform_device *pdev) -{ - gpmc_free_irq(); - gpmc_mem_exit(); - pm_runtime_put_sync(&pdev->dev); - pm_runtime_disable(&pdev->dev); - gpmc_dev = NULL; - return 0; -} - -#ifdef CONFIG_PM_SLEEP -static int gpmc_suspend(struct device *dev) -{ - omap3_gpmc_save_context(); - pm_runtime_put_sync(dev); - return 0; -} - -static int gpmc_resume(struct device *dev) -{ - pm_runtime_get_sync(dev); - omap3_gpmc_restore_context(); - return 0; -} -#endif - -static SIMPLE_DEV_PM_OPS(gpmc_pm_ops, gpmc_suspend, gpmc_resume); - -static struct platform_driver gpmc_driver = { - .probe = gpmc_probe, - .remove = gpmc_remove, - .driver = { - .name = DEVICE_NAME, - .owner = THIS_MODULE, - .of_match_table = of_match_ptr(gpmc_dt_ids), - .pm = &gpmc_pm_ops, - }, -}; - -static __init int gpmc_init(void) -{ - return platform_driver_register(&gpmc_driver); -} - -static __exit void gpmc_exit(void) -{ - platform_driver_unregister(&gpmc_driver); - -} - -postcore_initcall(gpmc_init); -module_exit(gpmc_exit); - -static irqreturn_t gpmc_handle_irq(int irq, void *dev) -{ - int i; - u32 regval; - - regval = gpmc_read_reg(GPMC_IRQSTATUS); - - if (!regval) - return IRQ_NONE; - - for (i = 0; i < GPMC_NR_IRQ; i++) - if (regval & gpmc_client_irq[i].bitmask) - generic_handle_irq(gpmc_client_irq[i].irq); - - gpmc_write_reg(GPMC_IRQSTATUS, regval); - - return IRQ_HANDLED; -} - -static struct omap3_gpmc_regs gpmc_context; - -void omap3_gpmc_save_context(void) -{ - int i; - - gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG); - gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE); - gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL); - gpmc_context.config = gpmc_read_reg(GPMC_CONFIG); - gpmc_context.prefetch_config1 = gpmc_read_reg(GPMC_PREFETCH_CONFIG1); - gpmc_context.prefetch_config2 = gpmc_read_reg(GPMC_PREFETCH_CONFIG2); - gpmc_context.prefetch_control = gpmc_read_reg(GPMC_PREFETCH_CONTROL); - for (i = 0; i < gpmc_cs_num; i++) { - gpmc_context.cs_context[i].is_valid = gpmc_cs_mem_enabled(i); - if (gpmc_context.cs_context[i].is_valid) { - gpmc_context.cs_context[i].config1 = - gpmc_cs_read_reg(i, GPMC_CS_CONFIG1); - gpmc_context.cs_context[i].config2 = - gpmc_cs_read_reg(i, GPMC_CS_CONFIG2); - gpmc_context.cs_context[i].config3 = - gpmc_cs_read_reg(i, GPMC_CS_CONFIG3); - gpmc_context.cs_context[i].config4 = - gpmc_cs_read_reg(i, GPMC_CS_CONFIG4); - gpmc_context.cs_context[i].config5 = - gpmc_cs_read_reg(i, GPMC_CS_CONFIG5); - gpmc_context.cs_context[i].config6 = - gpmc_cs_read_reg(i, GPMC_CS_CONFIG6); - gpmc_context.cs_context[i].config7 = - gpmc_cs_read_reg(i, GPMC_CS_CONFIG7); - } - } -} - -void omap3_gpmc_restore_context(void) -{ - int i; - - gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig); - gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable); - gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl); - gpmc_write_reg(GPMC_CONFIG, gpmc_context.config); - gpmc_write_reg(GPMC_PREFETCH_CONFIG1, gpmc_context.prefetch_config1); - gpmc_write_reg(GPMC_PREFETCH_CONFIG2, gpmc_context.prefetch_config2); - gpmc_write_reg(GPMC_PREFETCH_CONTROL, gpmc_context.prefetch_control); - for (i = 0; i < gpmc_cs_num; i++) { - if (gpmc_context.cs_context[i].is_valid) { - gpmc_cs_write_reg(i, GPMC_CS_CONFIG1, - gpmc_context.cs_context[i].config1); - gpmc_cs_write_reg(i, GPMC_CS_CONFIG2, - gpmc_context.cs_context[i].config2); - gpmc_cs_write_reg(i, GPMC_CS_CONFIG3, - gpmc_context.cs_context[i].config3); - gpmc_cs_write_reg(i, GPMC_CS_CONFIG4, - gpmc_context.cs_context[i].config4); - gpmc_cs_write_reg(i, GPMC_CS_CONFIG5, - gpmc_context.cs_context[i].config5); - gpmc_cs_write_reg(i, GPMC_CS_CONFIG6, - gpmc_context.cs_context[i].config6); - gpmc_cs_write_reg(i, GPMC_CS_CONFIG7, - gpmc_context.cs_context[i].config7); - } - } -} diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig index 6d91c27fd4c8..6759de7ce209 100644 --- a/drivers/memory/Kconfig +++ b/drivers/memory/Kconfig @@ -41,6 +41,14 @@ config TI_EMIF parameters and other settings during frequency, voltage and temperature changes +config OMAP_GPMC + bool + help + This driver is for the General Purpose Memory Controller (GPMC) + present on Texas Instruments SoCs (e.g. OMAP2+). GPMC allows + interfacing to a variety of asynchronous as well as synchronous + memory drives like NOR, NAND, OneNAND, SRAM. + config MVEBU_DEVBUS bool "Marvell EBU Device Bus Controller" default y diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile index c32d31981be3..a7d410f3ca32 100644 --- a/drivers/memory/Makefile +++ b/drivers/memory/Makefile @@ -8,6 +8,7 @@ endif obj-$(CONFIG_ATMEL_SDRAMC) += atmel-sdramc.o obj-$(CONFIG_TI_AEMIF) += ti-aemif.o obj-$(CONFIG_TI_EMIF) += emif.o +obj-$(CONFIG_OMAP_GPMC) += omap-gpmc.o obj-$(CONFIG_FSL_CORENET_CF) += fsl-corenet-cf.o obj-$(CONFIG_FSL_IFC) += fsl_ifc.o obj-$(CONFIG_MVEBU_DEVBUS) += mvebu-devbus.o diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c new file mode 100644 index 000000000000..abfc0ddd076e --- /dev/null +++ b/drivers/memory/omap-gpmc.c @@ -0,0 +1,2094 @@ +/* + * GPMC support functions + * + * Copyright (C) 2005-2006 Nokia Corporation + * + * Author: Juha Yrjola + * + * Copyright (C) 2009 Texas Instruments + * Added OMAP4 support - Santosh Shilimkar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#undef DEBUG + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#define DEVICE_NAME "omap-gpmc" + +/* GPMC register offsets */ +#define GPMC_REVISION 0x00 +#define GPMC_SYSCONFIG 0x10 +#define GPMC_SYSSTATUS 0x14 +#define GPMC_IRQSTATUS 0x18 +#define GPMC_IRQENABLE 0x1c +#define GPMC_TIMEOUT_CONTROL 0x40 +#define GPMC_ERR_ADDRESS 0x44 +#define GPMC_ERR_TYPE 0x48 +#define GPMC_CONFIG 0x50 +#define GPMC_STATUS 0x54 +#define GPMC_PREFETCH_CONFIG1 0x1e0 +#define GPMC_PREFETCH_CONFIG2 0x1e4 +#define GPMC_PREFETCH_CONTROL 0x1ec +#define GPMC_PREFETCH_STATUS 0x1f0 +#define GPMC_ECC_CONFIG 0x1f4 +#define GPMC_ECC_CONTROL 0x1f8 +#define GPMC_ECC_SIZE_CONFIG 0x1fc +#define GPMC_ECC1_RESULT 0x200 +#define GPMC_ECC_BCH_RESULT_0 0x240 /* not available on OMAP2 */ +#define GPMC_ECC_BCH_RESULT_1 0x244 /* not available on OMAP2 */ +#define GPMC_ECC_BCH_RESULT_2 0x248 /* not available on OMAP2 */ +#define GPMC_ECC_BCH_RESULT_3 0x24c /* not available on OMAP2 */ +#define GPMC_ECC_BCH_RESULT_4 0x300 /* not available on OMAP2 */ +#define GPMC_ECC_BCH_RESULT_5 0x304 /* not available on OMAP2 */ +#define GPMC_ECC_BCH_RESULT_6 0x308 /* not available on OMAP2 */ + +/* GPMC ECC control settings */ +#define GPMC_ECC_CTRL_ECCCLEAR 0x100 +#define GPMC_ECC_CTRL_ECCDISABLE 0x000 +#define GPMC_ECC_CTRL_ECCREG1 0x001 +#define GPMC_ECC_CTRL_ECCREG2 0x002 +#define GPMC_ECC_CTRL_ECCREG3 0x003 +#define GPMC_ECC_CTRL_ECCREG4 0x004 +#define GPMC_ECC_CTRL_ECCREG5 0x005 +#define GPMC_ECC_CTRL_ECCREG6 0x006 +#define GPMC_ECC_CTRL_ECCREG7 0x007 +#define GPMC_ECC_CTRL_ECCREG8 0x008 +#define GPMC_ECC_CTRL_ECCREG9 0x009 + +#define GPMC_CONFIG_LIMITEDADDRESS BIT(1) + +#define GPMC_CONFIG2_CSEXTRADELAY BIT(7) +#define GPMC_CONFIG3_ADVEXTRADELAY BIT(7) +#define GPMC_CONFIG4_OEEXTRADELAY BIT(7) +#define GPMC_CONFIG4_WEEXTRADELAY BIT(23) +#define GPMC_CONFIG6_CYCLE2CYCLEDIFFCSEN BIT(6) +#define GPMC_CONFIG6_CYCLE2CYCLESAMECSEN BIT(7) + +#define GPMC_CS0_OFFSET 0x60 +#define GPMC_CS_SIZE 0x30 +#define GPMC_BCH_SIZE 0x10 + +#define GPMC_MEM_END 0x3FFFFFFF + +#define GPMC_CHUNK_SHIFT 24 /* 16 MB */ +#define GPMC_SECTION_SHIFT 28 /* 128 MB */ + +#define CS_NUM_SHIFT 24 +#define ENABLE_PREFETCH (0x1 << 7) +#define DMA_MPU_MODE 2 + +#define GPMC_REVISION_MAJOR(l) ((l >> 4) & 0xf) +#define GPMC_REVISION_MINOR(l) (l & 0xf) + +#define GPMC_HAS_WR_ACCESS 0x1 +#define GPMC_HAS_WR_DATA_MUX_BUS 0x2 +#define GPMC_HAS_MUX_AAD 0x4 + +#define GPMC_NR_WAITPINS 4 + +#define GPMC_CS_CONFIG1 0x00 +#define GPMC_CS_CONFIG2 0x04 +#define GPMC_CS_CONFIG3 0x08 +#define GPMC_CS_CONFIG4 0x0c +#define GPMC_CS_CONFIG5 0x10 +#define GPMC_CS_CONFIG6 0x14 +#define GPMC_CS_CONFIG7 0x18 +#define GPMC_CS_NAND_COMMAND 0x1c +#define GPMC_CS_NAND_ADDRESS 0x20 +#define GPMC_CS_NAND_DATA 0x24 + +/* Control Commands */ +#define GPMC_CONFIG_RDY_BSY 0x00000001 +#define GPMC_CONFIG_DEV_SIZE 0x00000002 +#define GPMC_CONFIG_DEV_TYPE 0x00000003 +#define GPMC_SET_IRQ_STATUS 0x00000004 + +#define GPMC_CONFIG1_WRAPBURST_SUPP (1 << 31) +#define GPMC_CONFIG1_READMULTIPLE_SUPP (1 << 30) +#define GPMC_CONFIG1_READTYPE_ASYNC (0 << 29) +#define GPMC_CONFIG1_READTYPE_SYNC (1 << 29) +#define GPMC_CONFIG1_WRITEMULTIPLE_SUPP (1 << 28) +#define GPMC_CONFIG1_WRITETYPE_ASYNC (0 << 27) +#define GPMC_CONFIG1_WRITETYPE_SYNC (1 << 27) +#define GPMC_CONFIG1_CLKACTIVATIONTIME(val) ((val & 3) << 25) +#define GPMC_CONFIG1_PAGE_LEN(val) ((val & 3) << 23) +#define GPMC_CONFIG1_WAIT_READ_MON (1 << 22) +#define GPMC_CONFIG1_WAIT_WRITE_MON (1 << 21) +#define GPMC_CONFIG1_WAIT_MON_IIME(val) ((val & 3) << 18) +#define GPMC_CONFIG1_WAIT_PIN_SEL(val) ((val & 3) << 16) +#define GPMC_CONFIG1_DEVICESIZE(val) ((val & 3) << 12) +#define GPMC_CONFIG1_DEVICESIZE_16 GPMC_CONFIG1_DEVICESIZE(1) +#define GPMC_CONFIG1_DEVICETYPE(val) ((val & 3) << 10) +#define GPMC_CONFIG1_DEVICETYPE_NOR GPMC_CONFIG1_DEVICETYPE(0) +#define GPMC_CONFIG1_MUXTYPE(val) ((val & 3) << 8) +#define GPMC_CONFIG1_TIME_PARA_GRAN (1 << 4) +#define GPMC_CONFIG1_FCLK_DIV(val) (val & 3) +#define GPMC_CONFIG1_FCLK_DIV2 (GPMC_CONFIG1_FCLK_DIV(1)) +#define GPMC_CONFIG1_FCLK_DIV3 (GPMC_CONFIG1_FCLK_DIV(2)) +#define GPMC_CONFIG1_FCLK_DIV4 (GPMC_CONFIG1_FCLK_DIV(3)) +#define GPMC_CONFIG7_CSVALID (1 << 6) + +#define GPMC_DEVICETYPE_NOR 0 +#define GPMC_DEVICETYPE_NAND 2 +#define GPMC_CONFIG_WRITEPROTECT 0x00000010 +#define WR_RD_PIN_MONITORING 0x00600000 + +#define GPMC_ENABLE_IRQ 0x0000000d + +/* ECC commands */ +#define GPMC_ECC_READ 0 /* Reset Hardware ECC for read */ +#define GPMC_ECC_WRITE 1 /* Reset Hardware ECC for write */ +#define GPMC_ECC_READSYN 2 /* Reset before syndrom is read back */ + +/* XXX: Only NAND irq has been considered,currently these are the only ones used + */ +#define GPMC_NR_IRQ 2 + +struct gpmc_cs_data { + const char *name; + +#define GPMC_CS_RESERVED (1 << 0) + u32 flags; + + struct resource mem; +}; + +struct gpmc_client_irq { + unsigned irq; + u32 bitmask; +}; + +/* Structure to save gpmc cs context */ +struct gpmc_cs_config { + u32 config1; + u32 config2; + u32 config3; + u32 config4; + u32 config5; + u32 config6; + u32 config7; + int is_valid; +}; + +/* + * Structure to save/restore gpmc context + * to support core off on OMAP3 + */ +struct omap3_gpmc_regs { + u32 sysconfig; + u32 irqenable; + u32 timeout_ctrl; + u32 config; + u32 prefetch_config1; + u32 prefetch_config2; + u32 prefetch_control; + struct gpmc_cs_config cs_context[GPMC_CS_NUM]; +}; + +static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ]; +static struct irq_chip gpmc_irq_chip; +static int gpmc_irq_start; + +static struct resource gpmc_mem_root; +static struct gpmc_cs_data gpmc_cs[GPMC_CS_NUM]; +static DEFINE_SPINLOCK(gpmc_mem_lock); +/* Define chip-selects as reserved by default until probe completes */ +static unsigned int gpmc_cs_num = GPMC_CS_NUM; +static unsigned int gpmc_nr_waitpins; +static struct device *gpmc_dev; +static int gpmc_irq; +static resource_size_t phys_base, mem_size; +static unsigned gpmc_capability; +static void __iomem *gpmc_base; + +static struct clk *gpmc_l3_clk; + +static irqreturn_t gpmc_handle_irq(int irq, void *dev); + +static void gpmc_write_reg(int idx, u32 val) +{ + writel_relaxed(val, gpmc_base + idx); +} + +static u32 gpmc_read_reg(int idx) +{ + return readl_relaxed(gpmc_base + idx); +} + +void gpmc_cs_write_reg(int cs, int idx, u32 val) +{ + void __iomem *reg_addr; + + reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx; + writel_relaxed(val, reg_addr); +} + +static u32 gpmc_cs_read_reg(int cs, int idx) +{ + void __iomem *reg_addr; + + reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx; + return readl_relaxed(reg_addr); +} + +/* TODO: Add support for gpmc_fck to clock framework and use it */ +static unsigned long gpmc_get_fclk_period(void) +{ + unsigned long rate = clk_get_rate(gpmc_l3_clk); + + rate /= 1000; + rate = 1000000000 / rate; /* In picoseconds */ + + return rate; +} + +static unsigned int gpmc_ns_to_ticks(unsigned int time_ns) +{ + unsigned long tick_ps; + + /* Calculate in picosecs to yield more exact results */ + tick_ps = gpmc_get_fclk_period(); + + return (time_ns * 1000 + tick_ps - 1) / tick_ps; +} + +static unsigned int gpmc_ps_to_ticks(unsigned int time_ps) +{ + unsigned long tick_ps; + + /* Calculate in picosecs to yield more exact results */ + tick_ps = gpmc_get_fclk_period(); + + return (time_ps + tick_ps - 1) / tick_ps; +} + +unsigned int gpmc_ticks_to_ns(unsigned int ticks) +{ + return ticks * gpmc_get_fclk_period() / 1000; +} + +static unsigned int gpmc_ticks_to_ps(unsigned int ticks) +{ + return ticks * gpmc_get_fclk_period(); +} + +static unsigned int gpmc_round_ps_to_ticks(unsigned int time_ps) +{ + unsigned long ticks = gpmc_ps_to_ticks(time_ps); + + return ticks * gpmc_get_fclk_period(); +} + +static inline void gpmc_cs_modify_reg(int cs, int reg, u32 mask, bool value) +{ + u32 l; + + l = gpmc_cs_read_reg(cs, reg); + if (value) + l |= mask; + else + l &= ~mask; + gpmc_cs_write_reg(cs, reg, l); +} + +static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p) +{ + gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG1, + GPMC_CONFIG1_TIME_PARA_GRAN, + p->time_para_granularity); + gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG2, + GPMC_CONFIG2_CSEXTRADELAY, p->cs_extra_delay); + gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG3, + GPMC_CONFIG3_ADVEXTRADELAY, p->adv_extra_delay); + gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4, + GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay); + gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4, + GPMC_CONFIG4_OEEXTRADELAY, p->we_extra_delay); + gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6, + GPMC_CONFIG6_CYCLE2CYCLESAMECSEN, + p->cycle2cyclesamecsen); + gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6, + GPMC_CONFIG6_CYCLE2CYCLEDIFFCSEN, + p->cycle2cyclediffcsen); +} + +#ifdef DEBUG +static int get_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit, + bool raw, bool noval, int shift, + const char *name) +{ + u32 l; + int nr_bits, max_value, mask; + + l = gpmc_cs_read_reg(cs, reg); + nr_bits = end_bit - st_bit + 1; + max_value = (1 << nr_bits) - 1; + mask = max_value << st_bit; + l = (l & mask) >> st_bit; + if (shift) + l = (shift << l); + if (noval && (l == 0)) + return 0; + if (!raw) { + unsigned int time_ns_min, time_ns, time_ns_max; + + time_ns_min = gpmc_ticks_to_ns(l ? l - 1 : 0); + time_ns = gpmc_ticks_to_ns(l); + time_ns_max = gpmc_ticks_to_ns(l + 1 > max_value ? + max_value : l + 1); + pr_info("gpmc,%s = <%u> (%u - %u ns, %i ticks)\n", + name, time_ns, time_ns_min, time_ns_max, l); + } else { + pr_info("gpmc,%s = <%u>\n", name, l); + } + + return l; +} + +#define GPMC_PRINT_CONFIG(cs, config) \ + pr_info("cs%i %s: 0x%08x\n", cs, #config, \ + gpmc_cs_read_reg(cs, config)) +#define GPMC_GET_RAW(reg, st, end, field) \ + get_gpmc_timing_reg(cs, (reg), (st), (end), 1, 0, 0, field) +#define GPMC_GET_RAW_BOOL(reg, st, end, field) \ + get_gpmc_timing_reg(cs, (reg), (st), (end), 1, 1, 0, field) +#define GPMC_GET_RAW_SHIFT(reg, st, end, shift, field) \ + get_gpmc_timing_reg(cs, (reg), (st), (end), 1, 1, (shift), field) +#define GPMC_GET_TICKS(reg, st, end, field) \ + get_gpmc_timing_reg(cs, (reg), (st), (end), 0, 0, 0, field) + +static void gpmc_show_regs(int cs, const char *desc) +{ + pr_info("gpmc cs%i %s:\n", cs, desc); + GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG1); + GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG2); + GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG3); + GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG4); + GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG5); + GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG6); +} + +/* + * Note that gpmc,wait-pin handing wrongly assumes bit 8 is available, + * see commit c9fb809. + */ +static void gpmc_cs_show_timings(int cs, const char *desc) +{ + gpmc_show_regs(cs, desc); + + pr_info("gpmc cs%i access configuration:\n", cs); + GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 4, 4, "time-para-granularity"); + GPMC_GET_RAW(GPMC_CS_CONFIG1, 8, 9, "mux-add-data"); + GPMC_GET_RAW(GPMC_CS_CONFIG1, 12, 13, "device-width"); + GPMC_GET_RAW(GPMC_CS_CONFIG1, 16, 17, "wait-pin"); + GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 21, 21, "wait-on-write"); + GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 22, 22, "wait-on-read"); + GPMC_GET_RAW_SHIFT(GPMC_CS_CONFIG1, 23, 24, 4, "burst-length"); + GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 27, 27, "sync-write"); + GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 28, 28, "burst-write"); + GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 29, 29, "gpmc,sync-read"); + GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 30, 30, "burst-read"); + GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 31, 31, "burst-wrap"); + + GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG2, 7, 7, "cs-extra-delay"); + + GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG3, 7, 7, "adv-extra-delay"); + + GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG4, 23, 23, "we-extra-delay"); + GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG4, 7, 7, "oe-extra-delay"); + + GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG6, 7, 7, "cycle2cycle-samecsen"); + GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG6, 6, 6, "cycle2cycle-diffcsen"); + + pr_info("gpmc cs%i timings configuration:\n", cs); + GPMC_GET_TICKS(GPMC_CS_CONFIG2, 0, 3, "cs-on-ns"); + GPMC_GET_TICKS(GPMC_CS_CONFIG2, 8, 12, "cs-rd-off-ns"); + GPMC_GET_TICKS(GPMC_CS_CONFIG2, 16, 20, "cs-wr-off-ns"); + + GPMC_GET_TICKS(GPMC_CS_CONFIG3, 0, 3, "adv-on-ns"); + GPMC_GET_TICKS(GPMC_CS_CONFIG3, 8, 12, "adv-rd-off-ns"); + GPMC_GET_TICKS(GPMC_CS_CONFIG3, 16, 20, "adv-wr-off-ns"); + + GPMC_GET_TICKS(GPMC_CS_CONFIG4, 0, 3, "oe-on-ns"); + GPMC_GET_TICKS(GPMC_CS_CONFIG4, 8, 12, "oe-off-ns"); + GPMC_GET_TICKS(GPMC_CS_CONFIG4, 16, 19, "we-on-ns"); + GPMC_GET_TICKS(GPMC_CS_CONFIG4, 24, 28, "we-off-ns"); + + GPMC_GET_TICKS(GPMC_CS_CONFIG5, 0, 4, "rd-cycle-ns"); + GPMC_GET_TICKS(GPMC_CS_CONFIG5, 8, 12, "wr-cycle-ns"); + GPMC_GET_TICKS(GPMC_CS_CONFIG5, 16, 20, "access-ns"); + + GPMC_GET_TICKS(GPMC_CS_CONFIG5, 24, 27, "page-burst-access-ns"); + + GPMC_GET_TICKS(GPMC_CS_CONFIG6, 0, 3, "bus-turnaround-ns"); + GPMC_GET_TICKS(GPMC_CS_CONFIG6, 8, 11, "cycle2cycle-delay-ns"); + + GPMC_GET_TICKS(GPMC_CS_CONFIG1, 18, 19, "wait-monitoring-ns"); + GPMC_GET_TICKS(GPMC_CS_CONFIG1, 25, 26, "clk-activation-ns"); + + GPMC_GET_TICKS(GPMC_CS_CONFIG6, 16, 19, "wr-data-mux-bus-ns"); + GPMC_GET_TICKS(GPMC_CS_CONFIG6, 24, 28, "wr-access-ns"); +} +#else +static inline void gpmc_cs_show_timings(int cs, const char *desc) +{ +} +#endif + +static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit, + int time, const char *name) +{ + u32 l; + int ticks, mask, nr_bits; + + if (time == 0) + ticks = 0; + else + ticks = gpmc_ns_to_ticks(time); + nr_bits = end_bit - st_bit + 1; + mask = (1 << nr_bits) - 1; + + if (ticks > mask) { + pr_err("%s: GPMC error! CS%d: %s: %d ns, %d ticks > %d\n", + __func__, cs, name, time, ticks, mask); + + return -1; + } + + l = gpmc_cs_read_reg(cs, reg); +#ifdef DEBUG + printk(KERN_INFO + "GPMC CS%d: %-10s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n", + cs, name, ticks, gpmc_get_fclk_period() * ticks / 1000, + (l >> st_bit) & mask, time); +#endif + l &= ~(mask << st_bit); + l |= ticks << st_bit; + gpmc_cs_write_reg(cs, reg, l); + + return 0; +} + +#define GPMC_SET_ONE(reg, st, end, field) \ + if (set_gpmc_timing_reg(cs, (reg), (st), (end), \ + t->field, #field) < 0) \ + return -1 + +int gpmc_calc_divider(unsigned int sync_clk) +{ + int div; + u32 l; + + l = sync_clk + (gpmc_get_fclk_period() - 1); + div = l / gpmc_get_fclk_period(); + if (div > 4) + return -1; + if (div <= 0) + div = 1; + + return div; +} + +int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t) +{ + int div; + u32 l; + + gpmc_cs_show_timings(cs, "before gpmc_cs_set_timings"); + div = gpmc_calc_divider(t->sync_clk); + if (div < 0) + return div; + + GPMC_SET_ONE(GPMC_CS_CONFIG2, 0, 3, cs_on); + GPMC_SET_ONE(GPMC_CS_CONFIG2, 8, 12, cs_rd_off); + GPMC_SET_ONE(GPMC_CS_CONFIG2, 16, 20, cs_wr_off); + + GPMC_SET_ONE(GPMC_CS_CONFIG3, 0, 3, adv_on); + GPMC_SET_ONE(GPMC_CS_CONFIG3, 8, 12, adv_rd_off); + GPMC_SET_ONE(GPMC_CS_CONFIG3, 16, 20, adv_wr_off); + + GPMC_SET_ONE(GPMC_CS_CONFIG4, 0, 3, oe_on); + GPMC_SET_ONE(GPMC_CS_CONFIG4, 8, 12, oe_off); + GPMC_SET_ONE(GPMC_CS_CONFIG4, 16, 19, we_on); + GPMC_SET_ONE(GPMC_CS_CONFIG4, 24, 28, we_off); + + GPMC_SET_ONE(GPMC_CS_CONFIG5, 0, 4, rd_cycle); + GPMC_SET_ONE(GPMC_CS_CONFIG5, 8, 12, wr_cycle); + GPMC_SET_ONE(GPMC_CS_CONFIG5, 16, 20, access); + + GPMC_SET_ONE(GPMC_CS_CONFIG5, 24, 27, page_burst_access); + + GPMC_SET_ONE(GPMC_CS_CONFIG6, 0, 3, bus_turnaround); + GPMC_SET_ONE(GPMC_CS_CONFIG6, 8, 11, cycle2cycle_delay); + + GPMC_SET_ONE(GPMC_CS_CONFIG1, 18, 19, wait_monitoring); + GPMC_SET_ONE(GPMC_CS_CONFIG1, 25, 26, clk_activation); + + if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS) + GPMC_SET_ONE(GPMC_CS_CONFIG6, 16, 19, wr_data_mux_bus); + if (gpmc_capability & GPMC_HAS_WR_ACCESS) + GPMC_SET_ONE(GPMC_CS_CONFIG6, 24, 28, wr_access); + + /* caller is expected to have initialized CONFIG1 to cover + * at least sync vs async + */ + l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1); + if (l & (GPMC_CONFIG1_READTYPE_SYNC | GPMC_CONFIG1_WRITETYPE_SYNC)) { +#ifdef DEBUG + printk(KERN_INFO "GPMC CS%d CLK period is %lu ns (div %d)\n", + cs, (div * gpmc_get_fclk_period()) / 1000, div); +#endif + l &= ~0x03; + l |= (div - 1); + gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, l); + } + + gpmc_cs_bool_timings(cs, &t->bool_timings); + gpmc_cs_show_timings(cs, "after gpmc_cs_set_timings"); + + return 0; +} + +static int gpmc_cs_set_memconf(int cs, u32 base, u32 size) +{ + u32 l; + u32 mask; + + /* + * Ensure that base address is aligned on a + * boundary equal to or greater than size. + */ + if (base & (size - 1)) + return -EINVAL; + + mask = (1 << GPMC_SECTION_SHIFT) - size; + l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7); + l &= ~0x3f; + l = (base >> GPMC_CHUNK_SHIFT) & 0x3f; + l &= ~(0x0f << 8); + l |= ((mask >> GPMC_CHUNK_SHIFT) & 0x0f) << 8; + l |= GPMC_CONFIG7_CSVALID; + gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l); + + return 0; +} + +static void gpmc_cs_enable_mem(int cs) +{ + u32 l; + + l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7); + l |= GPMC_CONFIG7_CSVALID; + gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l); +} + +static void gpmc_cs_disable_mem(int cs) +{ + u32 l; + + l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7); + l &= ~GPMC_CONFIG7_CSVALID; + gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l); +} + +static void gpmc_cs_get_memconf(int cs, u32 *base, u32 *size) +{ + u32 l; + u32 mask; + + l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7); + *base = (l & 0x3f) << GPMC_CHUNK_SHIFT; + mask = (l >> 8) & 0x0f; + *size = (1 << GPMC_SECTION_SHIFT) - (mask << GPMC_CHUNK_SHIFT); +} + +static int gpmc_cs_mem_enabled(int cs) +{ + u32 l; + + l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7); + return l & GPMC_CONFIG7_CSVALID; +} + +static void gpmc_cs_set_reserved(int cs, int reserved) +{ + struct gpmc_cs_data *gpmc = &gpmc_cs[cs]; + + gpmc->flags |= GPMC_CS_RESERVED; +} + +static bool gpmc_cs_reserved(int cs) +{ + struct gpmc_cs_data *gpmc = &gpmc_cs[cs]; + + return gpmc->flags & GPMC_CS_RESERVED; +} + +static void gpmc_cs_set_name(int cs, const char *name) +{ + struct gpmc_cs_data *gpmc = &gpmc_cs[cs]; + + gpmc->name = name; +} + +const char *gpmc_cs_get_name(int cs) +{ + struct gpmc_cs_data *gpmc = &gpmc_cs[cs]; + + return gpmc->name; +} + +static unsigned long gpmc_mem_align(unsigned long size) +{ + int order; + + size = (size - 1) >> (GPMC_CHUNK_SHIFT - 1); + order = GPMC_CHUNK_SHIFT - 1; + do { + size >>= 1; + order++; + } while (size); + size = 1 << order; + return size; +} + +static int gpmc_cs_insert_mem(int cs, unsigned long base, unsigned long size) +{ + struct gpmc_cs_data *gpmc = &gpmc_cs[cs]; + struct resource *res = &gpmc->mem; + int r; + + size = gpmc_mem_align(size); + spin_lock(&gpmc_mem_lock); + res->start = base; + res->end = base + size - 1; + r = request_resource(&gpmc_mem_root, res); + spin_unlock(&gpmc_mem_lock); + + return r; +} + +static int gpmc_cs_delete_mem(int cs) +{ + struct gpmc_cs_data *gpmc = &gpmc_cs[cs]; + struct resource *res = &gpmc->mem; + int r; + + spin_lock(&gpmc_mem_lock); + r = release_resource(res); + res->start = 0; + res->end = 0; + spin_unlock(&gpmc_mem_lock); + + return r; +} + +/** + * gpmc_cs_remap - remaps a chip-select physical base address + * @cs: chip-select to remap + * @base: physical base address to re-map chip-select to + * + * Re-maps a chip-select to a new physical base address specified by + * "base". Returns 0 on success and appropriate negative error code + * on failure. + */ +static int gpmc_cs_remap(int cs, u32 base) +{ + int ret; + u32 old_base, size; + + if (cs > gpmc_cs_num) { + pr_err("%s: requested chip-select is disabled\n", __func__); + return -ENODEV; + } + + /* + * Make sure we ignore any device offsets from the GPMC partition + * allocated for the chip select and that the new base confirms + * to the GPMC 16MB minimum granularity. + */ + base &= ~(SZ_16M - 1); + + gpmc_cs_get_memconf(cs, &old_base, &size); + if (base == old_base) + return 0; + + ret = gpmc_cs_delete_mem(cs); + if (ret < 0) + return ret; + + ret = gpmc_cs_insert_mem(cs, base, size); + if (ret < 0) + return ret; + + ret = gpmc_cs_set_memconf(cs, base, size); + + return ret; +} + +int gpmc_cs_request(int cs, unsigned long size, unsigned long *base) +{ + struct gpmc_cs_data *gpmc = &gpmc_cs[cs]; + struct resource *res = &gpmc->mem; + int r = -1; + + if (cs > gpmc_cs_num) { + pr_err("%s: requested chip-select is disabled\n", __func__); + return -ENODEV; + } + size = gpmc_mem_align(size); + if (size > (1 << GPMC_SECTION_SHIFT)) + return -ENOMEM; + + spin_lock(&gpmc_mem_lock); + if (gpmc_cs_reserved(cs)) { + r = -EBUSY; + goto out; + } + if (gpmc_cs_mem_enabled(cs)) + r = adjust_resource(res, res->start & ~(size - 1), size); + if (r < 0) + r = allocate_resource(&gpmc_mem_root, res, size, 0, ~0, + size, NULL, NULL); + if (r < 0) + goto out; + + /* Disable CS while changing base address and size mask */ + gpmc_cs_disable_mem(cs); + + r = gpmc_cs_set_memconf(cs, res->start, resource_size(res)); + if (r < 0) { + release_resource(res); + goto out; + } + + /* Enable CS */ + gpmc_cs_enable_mem(cs); + *base = res->start; + gpmc_cs_set_reserved(cs, 1); +out: + spin_unlock(&gpmc_mem_lock); + return r; +} +EXPORT_SYMBOL(gpmc_cs_request); + +void gpmc_cs_free(int cs) +{ + struct gpmc_cs_data *gpmc = &gpmc_cs[cs]; + struct resource *res = &gpmc->mem; + + spin_lock(&gpmc_mem_lock); + if (cs >= gpmc_cs_num || cs < 0 || !gpmc_cs_reserved(cs)) { + printk(KERN_ERR "Trying to free non-reserved GPMC CS%d\n", cs); + BUG(); + spin_unlock(&gpmc_mem_lock); + return; + } + gpmc_cs_disable_mem(cs); + if (res->flags) + release_resource(res); + gpmc_cs_set_reserved(cs, 0); + spin_unlock(&gpmc_mem_lock); +} +EXPORT_SYMBOL(gpmc_cs_free); + +/** + * gpmc_configure - write request to configure gpmc + * @cmd: command type + * @wval: value to write + * @return status of the operation + */ +int gpmc_configure(int cmd, int wval) +{ + u32 regval; + + switch (cmd) { + case GPMC_ENABLE_IRQ: + gpmc_write_reg(GPMC_IRQENABLE, wval); + break; + + case GPMC_SET_IRQ_STATUS: + gpmc_write_reg(GPMC_IRQSTATUS, wval); + break; + + case GPMC_CONFIG_WP: + regval = gpmc_read_reg(GPMC_CONFIG); + if (wval) + regval &= ~GPMC_CONFIG_WRITEPROTECT; /* WP is ON */ + else + regval |= GPMC_CONFIG_WRITEPROTECT; /* WP is OFF */ + gpmc_write_reg(GPMC_CONFIG, regval); + break; + + default: + pr_err("%s: command not supported\n", __func__); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(gpmc_configure); + +void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs) +{ + int i; + + reg->gpmc_status = gpmc_base + GPMC_STATUS; + reg->gpmc_nand_command = gpmc_base + GPMC_CS0_OFFSET + + GPMC_CS_NAND_COMMAND + GPMC_CS_SIZE * cs; + reg->gpmc_nand_address = gpmc_base + GPMC_CS0_OFFSET + + GPMC_CS_NAND_ADDRESS + GPMC_CS_SIZE * cs; + reg->gpmc_nand_data = gpmc_base + GPMC_CS0_OFFSET + + GPMC_CS_NAND_DATA + GPMC_CS_SIZE * cs; + reg->gpmc_prefetch_config1 = gpmc_base + GPMC_PREFETCH_CONFIG1; + reg->gpmc_prefetch_config2 = gpmc_base + GPMC_PREFETCH_CONFIG2; + reg->gpmc_prefetch_control = gpmc_base + GPMC_PREFETCH_CONTROL; + reg->gpmc_prefetch_status = gpmc_base + GPMC_PREFETCH_STATUS; + reg->gpmc_ecc_config = gpmc_base + GPMC_ECC_CONFIG; + reg->gpmc_ecc_control = gpmc_base + GPMC_ECC_CONTROL; + reg->gpmc_ecc_size_config = gpmc_base + GPMC_ECC_SIZE_CONFIG; + reg->gpmc_ecc1_result = gpmc_base + GPMC_ECC1_RESULT; + + for (i = 0; i < GPMC_BCH_NUM_REMAINDER; i++) { + reg->gpmc_bch_result0[i] = gpmc_base + GPMC_ECC_BCH_RESULT_0 + + GPMC_BCH_SIZE * i; + reg->gpmc_bch_result1[i] = gpmc_base + GPMC_ECC_BCH_RESULT_1 + + GPMC_BCH_SIZE * i; + reg->gpmc_bch_result2[i] = gpmc_base + GPMC_ECC_BCH_RESULT_2 + + GPMC_BCH_SIZE * i; + reg->gpmc_bch_result3[i] = gpmc_base + GPMC_ECC_BCH_RESULT_3 + + GPMC_BCH_SIZE * i; + reg->gpmc_bch_result4[i] = gpmc_base + GPMC_ECC_BCH_RESULT_4 + + i * GPMC_BCH_SIZE; + reg->gpmc_bch_result5[i] = gpmc_base + GPMC_ECC_BCH_RESULT_5 + + i * GPMC_BCH_SIZE; + reg->gpmc_bch_result6[i] = gpmc_base + GPMC_ECC_BCH_RESULT_6 + + i * GPMC_BCH_SIZE; + } +} + +int gpmc_get_client_irq(unsigned irq_config) +{ + int i; + + if (hweight32(irq_config) > 1) + return 0; + + for (i = 0; i < GPMC_NR_IRQ; i++) + if (gpmc_client_irq[i].bitmask & irq_config) + return gpmc_client_irq[i].irq; + + return 0; +} + +static int gpmc_irq_endis(unsigned irq, bool endis) +{ + int i; + u32 regval; + + for (i = 0; i < GPMC_NR_IRQ; i++) + if (irq == gpmc_client_irq[i].irq) { + regval = gpmc_read_reg(GPMC_IRQENABLE); + if (endis) + regval |= gpmc_client_irq[i].bitmask; + else + regval &= ~gpmc_client_irq[i].bitmask; + gpmc_write_reg(GPMC_IRQENABLE, regval); + break; + } + + return 0; +} + +static void gpmc_irq_disable(struct irq_data *p) +{ + gpmc_irq_endis(p->irq, false); +} + +static void gpmc_irq_enable(struct irq_data *p) +{ + gpmc_irq_endis(p->irq, true); +} + +static void gpmc_irq_noop(struct irq_data *data) { } + +static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; } + +static int gpmc_setup_irq(void) +{ + int i; + u32 regval; + + if (!gpmc_irq) + return -EINVAL; + + gpmc_irq_start = irq_alloc_descs(-1, 0, GPMC_NR_IRQ, 0); + if (gpmc_irq_start < 0) { + pr_err("irq_alloc_descs failed\n"); + return gpmc_irq_start; + } + + gpmc_irq_chip.name = "gpmc"; + gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret; + gpmc_irq_chip.irq_enable = gpmc_irq_enable; + gpmc_irq_chip.irq_disable = gpmc_irq_disable; + gpmc_irq_chip.irq_shutdown = gpmc_irq_noop; + gpmc_irq_chip.irq_ack = gpmc_irq_noop; + gpmc_irq_chip.irq_mask = gpmc_irq_noop; + gpmc_irq_chip.irq_unmask = gpmc_irq_noop; + + gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE; + gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT; + + for (i = 0; i < GPMC_NR_IRQ; i++) { + gpmc_client_irq[i].irq = gpmc_irq_start + i; + irq_set_chip_and_handler(gpmc_client_irq[i].irq, + &gpmc_irq_chip, handle_simple_irq); + set_irq_flags(gpmc_client_irq[i].irq, + IRQF_VALID | IRQF_NOAUTOEN); + } + + /* Disable interrupts */ + gpmc_write_reg(GPMC_IRQENABLE, 0); + + /* clear interrupts */ + regval = gpmc_read_reg(GPMC_IRQSTATUS); + gpmc_write_reg(GPMC_IRQSTATUS, regval); + + return request_irq(gpmc_irq, gpmc_handle_irq, 0, "gpmc", NULL); +} + +static int gpmc_free_irq(void) +{ + int i; + + if (gpmc_irq) + free_irq(gpmc_irq, NULL); + + for (i = 0; i < GPMC_NR_IRQ; i++) { + irq_set_handler(gpmc_client_irq[i].irq, NULL); + irq_set_chip(gpmc_client_irq[i].irq, &no_irq_chip); + irq_modify_status(gpmc_client_irq[i].irq, 0, 0); + } + + irq_free_descs(gpmc_irq_start, GPMC_NR_IRQ); + + return 0; +} + +static void gpmc_mem_exit(void) +{ + int cs; + + for (cs = 0; cs < gpmc_cs_num; cs++) { + if (!gpmc_cs_mem_enabled(cs)) + continue; + gpmc_cs_delete_mem(cs); + } + +} + +static void gpmc_mem_init(void) +{ + int cs; + + /* + * The first 1MB of GPMC address space is typically mapped to + * the internal ROM. Never allocate the first page, to + * facilitate bug detection; even if we didn't boot from ROM. + */ + gpmc_mem_root.start = SZ_1M; + gpmc_mem_root.end = GPMC_MEM_END; + + /* Reserve all regions that has been set up by bootloader */ + for (cs = 0; cs < gpmc_cs_num; cs++) { + u32 base, size; + + if (!gpmc_cs_mem_enabled(cs)) + continue; + gpmc_cs_get_memconf(cs, &base, &size); + if (gpmc_cs_insert_mem(cs, base, size)) { + pr_warn("%s: disabling cs %d mapped at 0x%x-0x%x\n", + __func__, cs, base, base + size); + gpmc_cs_disable_mem(cs); + } + } +} + +static u32 gpmc_round_ps_to_sync_clk(u32 time_ps, u32 sync_clk) +{ + u32 temp; + int div; + + div = gpmc_calc_divider(sync_clk); + temp = gpmc_ps_to_ticks(time_ps); + temp = (temp + div - 1) / div; + return gpmc_ticks_to_ps(temp * div); +} + +/* XXX: can the cycles be avoided ? */ +static int gpmc_calc_sync_read_timings(struct gpmc_timings *gpmc_t, + struct gpmc_device_timings *dev_t, + bool mux) +{ + u32 temp; + + /* adv_rd_off */ + temp = dev_t->t_avdp_r; + /* XXX: mux check required ? */ + if (mux) { + /* XXX: t_avdp not to be required for sync, only added for tusb + * this indirectly necessitates requirement of t_avdp_r and + * t_avdp_w instead of having a single t_avdp + */ + temp = max_t(u32, temp, gpmc_t->clk_activation + dev_t->t_avdh); + temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp); + } + gpmc_t->adv_rd_off = gpmc_round_ps_to_ticks(temp); + + /* oe_on */ + temp = dev_t->t_oeasu; /* XXX: remove this ? */ + if (mux) { + temp = max_t(u32, temp, gpmc_t->clk_activation + dev_t->t_ach); + temp = max_t(u32, temp, gpmc_t->adv_rd_off + + gpmc_ticks_to_ps(dev_t->cyc_aavdh_oe)); + } + gpmc_t->oe_on = gpmc_round_ps_to_ticks(temp); + + /* access */ + /* XXX: any scope for improvement ?, by combining oe_on + * and clk_activation, need to check whether + * access = clk_activation + round to sync clk ? + */ + temp = max_t(u32, dev_t->t_iaa, dev_t->cyc_iaa * gpmc_t->sync_clk); + temp += gpmc_t->clk_activation; + if (dev_t->cyc_oe) + temp = max_t(u32, temp, gpmc_t->oe_on + + gpmc_ticks_to_ps(dev_t->cyc_oe)); + gpmc_t->access = gpmc_round_ps_to_ticks(temp); + + gpmc_t->oe_off = gpmc_t->access + gpmc_ticks_to_ps(1); + gpmc_t->cs_rd_off = gpmc_t->oe_off; + + /* rd_cycle */ + temp = max_t(u32, dev_t->t_cez_r, dev_t->t_oez); + temp = gpmc_round_ps_to_sync_clk(temp, gpmc_t->sync_clk) + + gpmc_t->access; + /* XXX: barter t_ce_rdyz with t_cez_r ? */ + if (dev_t->t_ce_rdyz) + temp = max_t(u32, temp, gpmc_t->cs_rd_off + dev_t->t_ce_rdyz); + gpmc_t->rd_cycle = gpmc_round_ps_to_ticks(temp); + + return 0; +} + +static int gpmc_calc_sync_write_timings(struct gpmc_timings *gpmc_t, + struct gpmc_device_timings *dev_t, + bool mux) +{ + u32 temp; + + /* adv_wr_off */ + temp = dev_t->t_avdp_w; + if (mux) { + temp = max_t(u32, temp, + gpmc_t->clk_activation + dev_t->t_avdh); + temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp); + } + gpmc_t->adv_wr_off = gpmc_round_ps_to_ticks(temp); + + /* wr_data_mux_bus */ + temp = max_t(u32, dev_t->t_weasu, + gpmc_t->clk_activation + dev_t->t_rdyo); + /* XXX: shouldn't mux be kept as a whole for wr_data_mux_bus ?, + * and in that case remember to handle we_on properly + */ + if (mux) { + temp = max_t(u32, temp, + gpmc_t->adv_wr_off + dev_t->t_aavdh); + temp = max_t(u32, temp, gpmc_t->adv_wr_off + + gpmc_ticks_to_ps(dev_t->cyc_aavdh_we)); + } + gpmc_t->wr_data_mux_bus = gpmc_round_ps_to_ticks(temp); + + /* we_on */ + if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS) + gpmc_t->we_on = gpmc_round_ps_to_ticks(dev_t->t_weasu); + else + gpmc_t->we_on = gpmc_t->wr_data_mux_bus; + + /* wr_access */ + /* XXX: gpmc_capability check reqd ? , even if not, will not harm */ + gpmc_t->wr_access = gpmc_t->access; + + /* we_off */ + temp = gpmc_t->we_on + dev_t->t_wpl; + temp = max_t(u32, temp, + gpmc_t->wr_access + gpmc_ticks_to_ps(1)); + temp = max_t(u32, temp, + gpmc_t->we_on + gpmc_ticks_to_ps(dev_t->cyc_wpl)); + gpmc_t->we_off = gpmc_round_ps_to_ticks(temp); + + gpmc_t->cs_wr_off = gpmc_round_ps_to_ticks(gpmc_t->we_off + + dev_t->t_wph); + + /* wr_cycle */ + temp = gpmc_round_ps_to_sync_clk(dev_t->t_cez_w, gpmc_t->sync_clk); + temp += gpmc_t->wr_access; + /* XXX: barter t_ce_rdyz with t_cez_w ? */ + if (dev_t->t_ce_rdyz) + temp = max_t(u32, temp, + gpmc_t->cs_wr_off + dev_t->t_ce_rdyz); + gpmc_t->wr_cycle = gpmc_round_ps_to_ticks(temp); + + return 0; +} + +static int gpmc_calc_async_read_timings(struct gpmc_timings *gpmc_t, + struct gpmc_device_timings *dev_t, + bool mux) +{ + u32 temp; + + /* adv_rd_off */ + temp = dev_t->t_avdp_r; + if (mux) + temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp); + gpmc_t->adv_rd_off = gpmc_round_ps_to_ticks(temp); + + /* oe_on */ + temp = dev_t->t_oeasu; + if (mux) + temp = max_t(u32, temp, + gpmc_t->adv_rd_off + dev_t->t_aavdh); + gpmc_t->oe_on = gpmc_round_ps_to_ticks(temp); + + /* access */ + temp = max_t(u32, dev_t->t_iaa, /* XXX: remove t_iaa in async ? */ + gpmc_t->oe_on + dev_t->t_oe); + temp = max_t(u32, temp, + gpmc_t->cs_on + dev_t->t_ce); + temp = max_t(u32, temp, + gpmc_t->adv_on + dev_t->t_aa); + gpmc_t->access = gpmc_round_ps_to_ticks(temp); + + gpmc_t->oe_off = gpmc_t->access + gpmc_ticks_to_ps(1); + gpmc_t->cs_rd_off = gpmc_t->oe_off; + + /* rd_cycle */ + temp = max_t(u32, dev_t->t_rd_cycle, + gpmc_t->cs_rd_off + dev_t->t_cez_r); + temp = max_t(u32, temp, gpmc_t->oe_off + dev_t->t_oez); + gpmc_t->rd_cycle = gpmc_round_ps_to_ticks(temp); + + return 0; +} + +static int gpmc_calc_async_write_timings(struct gpmc_timings *gpmc_t, + struct gpmc_device_timings *dev_t, + bool mux) +{ + u32 temp; + + /* adv_wr_off */ + temp = dev_t->t_avdp_w; + if (mux) + temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp); + gpmc_t->adv_wr_off = gpmc_round_ps_to_ticks(temp); + + /* wr_data_mux_bus */ + temp = dev_t->t_weasu; + if (mux) { + temp = max_t(u32, temp, gpmc_t->adv_wr_off + dev_t->t_aavdh); + temp = max_t(u32, temp, gpmc_t->adv_wr_off + + gpmc_ticks_to_ps(dev_t->cyc_aavdh_we)); + } + gpmc_t->wr_data_mux_bus = gpmc_round_ps_to_ticks(temp); + + /* we_on */ + if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS) + gpmc_t->we_on = gpmc_round_ps_to_ticks(dev_t->t_weasu); + else + gpmc_t->we_on = gpmc_t->wr_data_mux_bus; + + /* we_off */ + temp = gpmc_t->we_on + dev_t->t_wpl; + gpmc_t->we_off = gpmc_round_ps_to_ticks(temp); + + gpmc_t->cs_wr_off = gpmc_round_ps_to_ticks(gpmc_t->we_off + + dev_t->t_wph); + + /* wr_cycle */ + temp = max_t(u32, dev_t->t_wr_cycle, + gpmc_t->cs_wr_off + dev_t->t_cez_w); + gpmc_t->wr_cycle = gpmc_round_ps_to_ticks(temp); + + return 0; +} + +static int gpmc_calc_sync_common_timings(struct gpmc_timings *gpmc_t, + struct gpmc_device_timings *dev_t) +{ + u32 temp; + + gpmc_t->sync_clk = gpmc_calc_divider(dev_t->clk) * + gpmc_get_fclk_period(); + + gpmc_t->page_burst_access = gpmc_round_ps_to_sync_clk( + dev_t->t_bacc, + gpmc_t->sync_clk); + + temp = max_t(u32, dev_t->t_ces, dev_t->t_avds); + gpmc_t->clk_activation = gpmc_round_ps_to_ticks(temp); + + if (gpmc_calc_divider(gpmc_t->sync_clk) != 1) + return 0; + + if (dev_t->ce_xdelay) + gpmc_t->bool_timings.cs_extra_delay = true; + if (dev_t->avd_xdelay) + gpmc_t->bool_timings.adv_extra_delay = true; + if (dev_t->oe_xdelay) + gpmc_t->bool_timings.oe_extra_delay = true; + if (dev_t->we_xdelay) + gpmc_t->bool_timings.we_extra_delay = true; + + return 0; +} + +static int gpmc_calc_common_timings(struct gpmc_timings *gpmc_t, + struct gpmc_device_timings *dev_t, + bool sync) +{ + u32 temp; + + /* cs_on */ + gpmc_t->cs_on = gpmc_round_ps_to_ticks(dev_t->t_ceasu); + + /* adv_on */ + temp = dev_t->t_avdasu; + if (dev_t->t_ce_avd) + temp = max_t(u32, temp, + gpmc_t->cs_on + dev_t->t_ce_avd); + gpmc_t->adv_on = gpmc_round_ps_to_ticks(temp); + + if (sync) + gpmc_calc_sync_common_timings(gpmc_t, dev_t); + + return 0; +} + +/* TODO: remove this function once all peripherals are confirmed to + * work with generic timing. Simultaneously gpmc_cs_set_timings() + * has to be modified to handle timings in ps instead of ns +*/ +static void gpmc_convert_ps_to_ns(struct gpmc_timings *t) +{ + t->cs_on /= 1000; + t->cs_rd_off /= 1000; + t->cs_wr_off /= 1000; + t->adv_on /= 1000; + t->adv_rd_off /= 1000; + t->adv_wr_off /= 1000; + t->we_on /= 1000; + t->we_off /= 1000; + t->oe_on /= 1000; + t->oe_off /= 1000; + t->page_burst_access /= 1000; + t->access /= 1000; + t->rd_cycle /= 1000; + t->wr_cycle /= 1000; + t->bus_turnaround /= 1000; + t->cycle2cycle_delay /= 1000; + t->wait_monitoring /= 1000; + t->clk_activation /= 1000; + t->wr_access /= 1000; + t->wr_data_mux_bus /= 1000; +} + +int gpmc_calc_timings(struct gpmc_timings *gpmc_t, + struct gpmc_settings *gpmc_s, + struct gpmc_device_timings *dev_t) +{ + bool mux = false, sync = false; + + if (gpmc_s) { + mux = gpmc_s->mux_add_data ? true : false; + sync = (gpmc_s->sync_read || gpmc_s->sync_write); + } + + memset(gpmc_t, 0, sizeof(*gpmc_t)); + + gpmc_calc_common_timings(gpmc_t, dev_t, sync); + + if (gpmc_s && gpmc_s->sync_read) + gpmc_calc_sync_read_timings(gpmc_t, dev_t, mux); + else + gpmc_calc_async_read_timings(gpmc_t, dev_t, mux); + + if (gpmc_s && gpmc_s->sync_write) + gpmc_calc_sync_write_timings(gpmc_t, dev_t, mux); + else + gpmc_calc_async_write_timings(gpmc_t, dev_t, mux); + + /* TODO: remove, see function definition */ + gpmc_convert_ps_to_ns(gpmc_t); + + return 0; +} + +/** + * gpmc_cs_program_settings - programs non-timing related settings + * @cs: GPMC chip-select to program + * @p: pointer to GPMC settings structure + * + * Programs non-timing related settings for a GPMC chip-select, such as + * bus-width, burst configuration, etc. Function should be called once + * for each chip-select that is being used and must be called before + * calling gpmc_cs_set_timings() as timing parameters in the CONFIG1 + * register will be initialised to zero by this function. Returns 0 on + * success and appropriate negative error code on failure. + */ +int gpmc_cs_program_settings(int cs, struct gpmc_settings *p) +{ + u32 config1; + + if ((!p->device_width) || (p->device_width > GPMC_DEVWIDTH_16BIT)) { + pr_err("%s: invalid width %d!", __func__, p->device_width); + return -EINVAL; + } + + /* Address-data multiplexing not supported for NAND devices */ + if (p->device_nand && p->mux_add_data) { + pr_err("%s: invalid configuration!\n", __func__); + return -EINVAL; + } + + if ((p->mux_add_data > GPMC_MUX_AD) || + ((p->mux_add_data == GPMC_MUX_AAD) && + !(gpmc_capability & GPMC_HAS_MUX_AAD))) { + pr_err("%s: invalid multiplex configuration!\n", __func__); + return -EINVAL; + } + + /* Page/burst mode supports lengths of 4, 8 and 16 bytes */ + if (p->burst_read || p->burst_write) { + switch (p->burst_len) { + case GPMC_BURST_4: + case GPMC_BURST_8: + case GPMC_BURST_16: + break; + default: + pr_err("%s: invalid page/burst-length (%d)\n", + __func__, p->burst_len); + return -EINVAL; + } + } + + if (p->wait_pin > gpmc_nr_waitpins) { + pr_err("%s: invalid wait-pin (%d)\n", __func__, p->wait_pin); + return -EINVAL; + } + + config1 = GPMC_CONFIG1_DEVICESIZE((p->device_width - 1)); + + if (p->sync_read) + config1 |= GPMC_CONFIG1_READTYPE_SYNC; + if (p->sync_write) + config1 |= GPMC_CONFIG1_WRITETYPE_SYNC; + if (p->wait_on_read) + config1 |= GPMC_CONFIG1_WAIT_READ_MON; + if (p->wait_on_write) + config1 |= GPMC_CONFIG1_WAIT_WRITE_MON; + if (p->wait_on_read || p->wait_on_write) + config1 |= GPMC_CONFIG1_WAIT_PIN_SEL(p->wait_pin); + if (p->device_nand) + config1 |= GPMC_CONFIG1_DEVICETYPE(GPMC_DEVICETYPE_NAND); + if (p->mux_add_data) + config1 |= GPMC_CONFIG1_MUXTYPE(p->mux_add_data); + if (p->burst_read) + config1 |= GPMC_CONFIG1_READMULTIPLE_SUPP; + if (p->burst_write) + config1 |= GPMC_CONFIG1_WRITEMULTIPLE_SUPP; + if (p->burst_read || p->burst_write) { + config1 |= GPMC_CONFIG1_PAGE_LEN(p->burst_len >> 3); + config1 |= p->burst_wrap ? GPMC_CONFIG1_WRAPBURST_SUPP : 0; + } + + gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, config1); + + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id gpmc_dt_ids[] = { + { .compatible = "ti,omap2420-gpmc" }, + { .compatible = "ti,omap2430-gpmc" }, + { .compatible = "ti,omap3430-gpmc" }, /* omap3430 & omap3630 */ + { .compatible = "ti,omap4430-gpmc" }, /* omap4430 & omap4460 & omap543x */ + { .compatible = "ti,am3352-gpmc" }, /* am335x devices */ + { } +}; +MODULE_DEVICE_TABLE(of, gpmc_dt_ids); + +/** + * gpmc_read_settings_dt - read gpmc settings from device-tree + * @np: pointer to device-tree node for a gpmc child device + * @p: pointer to gpmc settings structure + * + * Reads the GPMC settings for a GPMC child device from device-tree and + * stores them in the GPMC settings structure passed. The GPMC settings + * structure is initialised to zero by this function and so any + * previously stored settings will be cleared. + */ +void gpmc_read_settings_dt(struct device_node *np, struct gpmc_settings *p) +{ + memset(p, 0, sizeof(struct gpmc_settings)); + + p->sync_read = of_property_read_bool(np, "gpmc,sync-read"); + p->sync_write = of_property_read_bool(np, "gpmc,sync-write"); + of_property_read_u32(np, "gpmc,device-width", &p->device_width); + of_property_read_u32(np, "gpmc,mux-add-data", &p->mux_add_data); + + if (!of_property_read_u32(np, "gpmc,burst-length", &p->burst_len)) { + p->burst_wrap = of_property_read_bool(np, "gpmc,burst-wrap"); + p->burst_read = of_property_read_bool(np, "gpmc,burst-read"); + p->burst_write = of_property_read_bool(np, "gpmc,burst-write"); + if (!p->burst_read && !p->burst_write) + pr_warn("%s: page/burst-length set but not used!\n", + __func__); + } + + if (!of_property_read_u32(np, "gpmc,wait-pin", &p->wait_pin)) { + p->wait_on_read = of_property_read_bool(np, + "gpmc,wait-on-read"); + p->wait_on_write = of_property_read_bool(np, + "gpmc,wait-on-write"); + if (!p->wait_on_read && !p->wait_on_write) + pr_debug("%s: rd/wr wait monitoring not enabled!\n", + __func__); + } +} + +static void __maybe_unused gpmc_read_timings_dt(struct device_node *np, + struct gpmc_timings *gpmc_t) +{ + struct gpmc_bool_timings *p; + + if (!np || !gpmc_t) + return; + + memset(gpmc_t, 0, sizeof(*gpmc_t)); + + /* minimum clock period for syncronous mode */ + of_property_read_u32(np, "gpmc,sync-clk-ps", &gpmc_t->sync_clk); + + /* chip select timtings */ + of_property_read_u32(np, "gpmc,cs-on-ns", &gpmc_t->cs_on); + of_property_read_u32(np, "gpmc,cs-rd-off-ns", &gpmc_t->cs_rd_off); + of_property_read_u32(np, "gpmc,cs-wr-off-ns", &gpmc_t->cs_wr_off); + + /* ADV signal timings */ + of_property_read_u32(np, "gpmc,adv-on-ns", &gpmc_t->adv_on); + of_property_read_u32(np, "gpmc,adv-rd-off-ns", &gpmc_t->adv_rd_off); + of_property_read_u32(np, "gpmc,adv-wr-off-ns", &gpmc_t->adv_wr_off); + + /* WE signal timings */ + of_property_read_u32(np, "gpmc,we-on-ns", &gpmc_t->we_on); + of_property_read_u32(np, "gpmc,we-off-ns", &gpmc_t->we_off); + + /* OE signal timings */ + of_property_read_u32(np, "gpmc,oe-on-ns", &gpmc_t->oe_on); + of_property_read_u32(np, "gpmc,oe-off-ns", &gpmc_t->oe_off); + + /* access and cycle timings */ + of_property_read_u32(np, "gpmc,page-burst-access-ns", + &gpmc_t->page_burst_access); + of_property_read_u32(np, "gpmc,access-ns", &gpmc_t->access); + of_property_read_u32(np, "gpmc,rd-cycle-ns", &gpmc_t->rd_cycle); + of_property_read_u32(np, "gpmc,wr-cycle-ns", &gpmc_t->wr_cycle); + of_property_read_u32(np, "gpmc,bus-turnaround-ns", + &gpmc_t->bus_turnaround); + of_property_read_u32(np, "gpmc,cycle2cycle-delay-ns", + &gpmc_t->cycle2cycle_delay); + of_property_read_u32(np, "gpmc,wait-monitoring-ns", + &gpmc_t->wait_monitoring); + of_property_read_u32(np, "gpmc,clk-activation-ns", + &gpmc_t->clk_activation); + + /* only applicable to OMAP3+ */ + of_property_read_u32(np, "gpmc,wr-access-ns", &gpmc_t->wr_access); + of_property_read_u32(np, "gpmc,wr-data-mux-bus-ns", + &gpmc_t->wr_data_mux_bus); + + /* bool timing parameters */ + p = &gpmc_t->bool_timings; + + p->cycle2cyclediffcsen = + of_property_read_bool(np, "gpmc,cycle2cycle-diffcsen"); + p->cycle2cyclesamecsen = + of_property_read_bool(np, "gpmc,cycle2cycle-samecsen"); + p->we_extra_delay = of_property_read_bool(np, "gpmc,we-extra-delay"); + p->oe_extra_delay = of_property_read_bool(np, "gpmc,oe-extra-delay"); + p->adv_extra_delay = of_property_read_bool(np, "gpmc,adv-extra-delay"); + p->cs_extra_delay = of_property_read_bool(np, "gpmc,cs-extra-delay"); + p->time_para_granularity = + of_property_read_bool(np, "gpmc,time-para-granularity"); +} + +#if IS_ENABLED(CONFIG_MTD_NAND) + +static const char * const nand_xfer_types[] = { + [NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled", + [NAND_OMAP_POLLED] = "polled", + [NAND_OMAP_PREFETCH_DMA] = "prefetch-dma", + [NAND_OMAP_PREFETCH_IRQ] = "prefetch-irq", +}; + +static int gpmc_probe_nand_child(struct platform_device *pdev, + struct device_node *child) +{ + u32 val; + const char *s; + struct gpmc_timings gpmc_t; + struct omap_nand_platform_data *gpmc_nand_data; + + if (of_property_read_u32(child, "reg", &val) < 0) { + dev_err(&pdev->dev, "%s has no 'reg' property\n", + child->full_name); + return -ENODEV; + } + + gpmc_nand_data = devm_kzalloc(&pdev->dev, sizeof(*gpmc_nand_data), + GFP_KERNEL); + if (!gpmc_nand_data) + return -ENOMEM; + + gpmc_nand_data->cs = val; + gpmc_nand_data->of_node = child; + + /* Detect availability of ELM module */ + gpmc_nand_data->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0); + if (gpmc_nand_data->elm_of_node == NULL) + gpmc_nand_data->elm_of_node = + of_parse_phandle(child, "elm_id", 0); + if (gpmc_nand_data->elm_of_node == NULL) + pr_warn("%s: ti,elm-id property not found\n", __func__); + + /* select ecc-scheme for NAND */ + if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) { + pr_err("%s: ti,nand-ecc-opt not found\n", __func__); + return -ENODEV; + } + + if (!strcmp(s, "sw")) + gpmc_nand_data->ecc_opt = OMAP_ECC_HAM1_CODE_SW; + else if (!strcmp(s, "ham1") || + !strcmp(s, "hw") || !strcmp(s, "hw-romcode")) + gpmc_nand_data->ecc_opt = + OMAP_ECC_HAM1_CODE_HW; + else if (!strcmp(s, "bch4")) + if (gpmc_nand_data->elm_of_node) + gpmc_nand_data->ecc_opt = + OMAP_ECC_BCH4_CODE_HW; + else + gpmc_nand_data->ecc_opt = + OMAP_ECC_BCH4_CODE_HW_DETECTION_SW; + else if (!strcmp(s, "bch8")) + if (gpmc_nand_data->elm_of_node) + gpmc_nand_data->ecc_opt = + OMAP_ECC_BCH8_CODE_HW; + else + gpmc_nand_data->ecc_opt = + OMAP_ECC_BCH8_CODE_HW_DETECTION_SW; + else if (!strcmp(s, "bch16")) + if (gpmc_nand_data->elm_of_node) + gpmc_nand_data->ecc_opt = + OMAP_ECC_BCH16_CODE_HW; + else + pr_err("%s: BCH16 requires ELM support\n", __func__); + else + pr_err("%s: ti,nand-ecc-opt invalid value\n", __func__); + + /* select data transfer mode for NAND controller */ + if (!of_property_read_string(child, "ti,nand-xfer-type", &s)) + for (val = 0; val < ARRAY_SIZE(nand_xfer_types); val++) + if (!strcasecmp(s, nand_xfer_types[val])) { + gpmc_nand_data->xfer_type = val; + break; + } + + gpmc_nand_data->flash_bbt = of_get_nand_on_flash_bbt(child); + + val = of_get_nand_bus_width(child); + if (val == 16) + gpmc_nand_data->devsize = NAND_BUSWIDTH_16; + + gpmc_read_timings_dt(child, &gpmc_t); + gpmc_nand_init(gpmc_nand_data, &gpmc_t); + + return 0; +} +#else +static int gpmc_probe_nand_child(struct platform_device *pdev, + struct device_node *child) +{ + return 0; +} +#endif + +#if IS_ENABLED(CONFIG_MTD_ONENAND) +static int gpmc_probe_onenand_child(struct platform_device *pdev, + struct device_node *child) +{ + u32 val; + struct omap_onenand_platform_data *gpmc_onenand_data; + + if (of_property_read_u32(child, "reg", &val) < 0) { + dev_err(&pdev->dev, "%s has no 'reg' property\n", + child->full_name); + return -ENODEV; + } + + gpmc_onenand_data = devm_kzalloc(&pdev->dev, sizeof(*gpmc_onenand_data), + GFP_KERNEL); + if (!gpmc_onenand_data) + return -ENOMEM; + + gpmc_onenand_data->cs = val; + gpmc_onenand_data->of_node = child; + gpmc_onenand_data->dma_channel = -1; + + if (!of_property_read_u32(child, "dma-channel", &val)) + gpmc_onenand_data->dma_channel = val; + + gpmc_onenand_init(gpmc_onenand_data); + + return 0; +} +#else +static int gpmc_probe_onenand_child(struct platform_device *pdev, + struct device_node *child) +{ + return 0; +} +#endif + +/** + * gpmc_probe_generic_child - configures the gpmc for a child device + * @pdev: pointer to gpmc platform device + * @child: pointer to device-tree node for child device + * + * Allocates and configures a GPMC chip-select for a child device. + * Returns 0 on success and appropriate negative error code on failure. + */ +static int gpmc_probe_generic_child(struct platform_device *pdev, + struct device_node *child) +{ + struct gpmc_settings gpmc_s; + struct gpmc_timings gpmc_t; + struct resource res; + unsigned long base; + const char *name; + int ret, cs; + u32 val; + + if (of_property_read_u32(child, "reg", &cs) < 0) { + dev_err(&pdev->dev, "%s has no 'reg' property\n", + child->full_name); + return -ENODEV; + } + + if (of_address_to_resource(child, 0, &res) < 0) { + dev_err(&pdev->dev, "%s has malformed 'reg' property\n", + child->full_name); + return -ENODEV; + } + + /* + * Check if we have multiple instances of the same device + * on a single chip select. If so, use the already initialized + * timings. + */ + name = gpmc_cs_get_name(cs); + if (name && child->name && of_node_cmp(child->name, name) == 0) + goto no_timings; + + ret = gpmc_cs_request(cs, resource_size(&res), &base); + if (ret < 0) { + dev_err(&pdev->dev, "cannot request GPMC CS %d\n", cs); + return ret; + } + gpmc_cs_set_name(cs, child->name); + + gpmc_read_settings_dt(child, &gpmc_s); + gpmc_read_timings_dt(child, &gpmc_t); + + /* + * For some GPMC devices we still need to rely on the bootloader + * timings because the devices can be connected via FPGA. + * REVISIT: Add timing support from slls644g.pdf. + */ + if (!gpmc_t.cs_rd_off) { + WARN(1, "enable GPMC debug to configure .dts timings for CS%i\n", + cs); + gpmc_cs_show_timings(cs, + "please add GPMC bootloader timings to .dts"); + goto no_timings; + } + + /* CS must be disabled while making changes to gpmc configuration */ + gpmc_cs_disable_mem(cs); + + /* + * FIXME: gpmc_cs_request() will map the CS to an arbitary + * location in the gpmc address space. When booting with + * device-tree we want the NOR flash to be mapped to the + * location specified in the device-tree blob. So remap the + * CS to this location. Once DT migration is complete should + * just make gpmc_cs_request() map a specific address. + */ + ret = gpmc_cs_remap(cs, res.start); + if (ret < 0) { + dev_err(&pdev->dev, "cannot remap GPMC CS %d to %pa\n", + cs, &res.start); + goto err; + } + + ret = of_property_read_u32(child, "bank-width", &gpmc_s.device_width); + if (ret < 0) + goto err; + + ret = gpmc_cs_program_settings(cs, &gpmc_s); + if (ret < 0) + goto err; + + ret = gpmc_cs_set_timings(cs, &gpmc_t); + if (ret) { + dev_err(&pdev->dev, "failed to set gpmc timings for: %s\n", + child->name); + goto err; + } + + /* Clear limited address i.e. enable A26-A11 */ + val = gpmc_read_reg(GPMC_CONFIG); + val &= ~GPMC_CONFIG_LIMITEDADDRESS; + gpmc_write_reg(GPMC_CONFIG, val); + + /* Enable CS region */ + gpmc_cs_enable_mem(cs); + +no_timings: + if (of_platform_device_create(child, NULL, &pdev->dev)) + return 0; + + dev_err(&pdev->dev, "failed to create gpmc child %s\n", child->name); + ret = -ENODEV; + +err: + gpmc_cs_free(cs); + + return ret; +} + +static int gpmc_probe_dt(struct platform_device *pdev) +{ + int ret; + struct device_node *child; + const struct of_device_id *of_id = + of_match_device(gpmc_dt_ids, &pdev->dev); + + if (!of_id) + return 0; + + ret = of_property_read_u32(pdev->dev.of_node, "gpmc,num-cs", + &gpmc_cs_num); + if (ret < 0) { + pr_err("%s: number of chip-selects not defined\n", __func__); + return ret; + } else if (gpmc_cs_num < 1) { + pr_err("%s: all chip-selects are disabled\n", __func__); + return -EINVAL; + } else if (gpmc_cs_num > GPMC_CS_NUM) { + pr_err("%s: number of supported chip-selects cannot be > %d\n", + __func__, GPMC_CS_NUM); + return -EINVAL; + } + + ret = of_property_read_u32(pdev->dev.of_node, "gpmc,num-waitpins", + &gpmc_nr_waitpins); + if (ret < 0) { + pr_err("%s: number of wait pins not found!\n", __func__); + return ret; + } + + for_each_available_child_of_node(pdev->dev.of_node, child) { + + if (!child->name) + continue; + + if (of_node_cmp(child->name, "nand") == 0) + ret = gpmc_probe_nand_child(pdev, child); + else if (of_node_cmp(child->name, "onenand") == 0) + ret = gpmc_probe_onenand_child(pdev, child); + else if (of_node_cmp(child->name, "ethernet") == 0 || + of_node_cmp(child->name, "nor") == 0 || + of_node_cmp(child->name, "uart") == 0) + ret = gpmc_probe_generic_child(pdev, child); + + if (WARN(ret < 0, "%s: probing gpmc child %s failed\n", + __func__, child->full_name)) + of_node_put(child); + } + + return 0; +} +#else +static int gpmc_probe_dt(struct platform_device *pdev) +{ + return 0; +} +#endif + +static int gpmc_probe(struct platform_device *pdev) +{ + int rc; + u32 l; + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) + return -ENOENT; + + phys_base = res->start; + mem_size = resource_size(res); + + gpmc_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(gpmc_base)) + return PTR_ERR(gpmc_base); + + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (res == NULL) + dev_warn(&pdev->dev, "Failed to get resource: irq\n"); + else + gpmc_irq = res->start; + + gpmc_l3_clk = devm_clk_get(&pdev->dev, "fck"); + if (IS_ERR(gpmc_l3_clk)) { + dev_err(&pdev->dev, "Failed to get GPMC fck\n"); + gpmc_irq = 0; + return PTR_ERR(gpmc_l3_clk); + } + + if (!clk_get_rate(gpmc_l3_clk)) { + dev_err(&pdev->dev, "Invalid GPMC fck clock rate\n"); + return -EINVAL; + } + + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); + + gpmc_dev = &pdev->dev; + + l = gpmc_read_reg(GPMC_REVISION); + + /* + * FIXME: Once device-tree migration is complete the below flags + * should be populated based upon the device-tree compatible + * string. For now just use the IP revision. OMAP3+ devices have + * the wr_access and wr_data_mux_bus register fields. OMAP4+ + * devices support the addr-addr-data multiplex protocol. + * + * GPMC IP revisions: + * - OMAP24xx = 2.0 + * - OMAP3xxx = 5.0 + * - OMAP44xx/54xx/AM335x = 6.0 + */ + if (GPMC_REVISION_MAJOR(l) > 0x4) + gpmc_capability = GPMC_HAS_WR_ACCESS | GPMC_HAS_WR_DATA_MUX_BUS; + if (GPMC_REVISION_MAJOR(l) > 0x5) + gpmc_capability |= GPMC_HAS_MUX_AAD; + dev_info(gpmc_dev, "GPMC revision %d.%d\n", GPMC_REVISION_MAJOR(l), + GPMC_REVISION_MINOR(l)); + + gpmc_mem_init(); + + if (gpmc_setup_irq() < 0) + dev_warn(gpmc_dev, "gpmc_setup_irq failed\n"); + + if (!pdev->dev.of_node) { + gpmc_cs_num = GPMC_CS_NUM; + gpmc_nr_waitpins = GPMC_NR_WAITPINS; + } + + rc = gpmc_probe_dt(pdev); + if (rc < 0) { + pm_runtime_put_sync(&pdev->dev); + dev_err(gpmc_dev, "failed to probe DT parameters\n"); + return rc; + } + + return 0; +} + +static int gpmc_remove(struct platform_device *pdev) +{ + gpmc_free_irq(); + gpmc_mem_exit(); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + gpmc_dev = NULL; + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int gpmc_suspend(struct device *dev) +{ + omap3_gpmc_save_context(); + pm_runtime_put_sync(dev); + return 0; +} + +static int gpmc_resume(struct device *dev) +{ + pm_runtime_get_sync(dev); + omap3_gpmc_restore_context(); + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(gpmc_pm_ops, gpmc_suspend, gpmc_resume); + +static struct platform_driver gpmc_driver = { + .probe = gpmc_probe, + .remove = gpmc_remove, + .driver = { + .name = DEVICE_NAME, + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(gpmc_dt_ids), + .pm = &gpmc_pm_ops, + }, +}; + +static __init int gpmc_init(void) +{ + return platform_driver_register(&gpmc_driver); +} + +static __exit void gpmc_exit(void) +{ + platform_driver_unregister(&gpmc_driver); + +} + +postcore_initcall(gpmc_init); +module_exit(gpmc_exit); + +static irqreturn_t gpmc_handle_irq(int irq, void *dev) +{ + int i; + u32 regval; + + regval = gpmc_read_reg(GPMC_IRQSTATUS); + + if (!regval) + return IRQ_NONE; + + for (i = 0; i < GPMC_NR_IRQ; i++) + if (regval & gpmc_client_irq[i].bitmask) + generic_handle_irq(gpmc_client_irq[i].irq); + + gpmc_write_reg(GPMC_IRQSTATUS, regval); + + return IRQ_HANDLED; +} + +static struct omap3_gpmc_regs gpmc_context; + +void omap3_gpmc_save_context(void) +{ + int i; + + gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG); + gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE); + gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL); + gpmc_context.config = gpmc_read_reg(GPMC_CONFIG); + gpmc_context.prefetch_config1 = gpmc_read_reg(GPMC_PREFETCH_CONFIG1); + gpmc_context.prefetch_config2 = gpmc_read_reg(GPMC_PREFETCH_CONFIG2); + gpmc_context.prefetch_control = gpmc_read_reg(GPMC_PREFETCH_CONTROL); + for (i = 0; i < gpmc_cs_num; i++) { + gpmc_context.cs_context[i].is_valid = gpmc_cs_mem_enabled(i); + if (gpmc_context.cs_context[i].is_valid) { + gpmc_context.cs_context[i].config1 = + gpmc_cs_read_reg(i, GPMC_CS_CONFIG1); + gpmc_context.cs_context[i].config2 = + gpmc_cs_read_reg(i, GPMC_CS_CONFIG2); + gpmc_context.cs_context[i].config3 = + gpmc_cs_read_reg(i, GPMC_CS_CONFIG3); + gpmc_context.cs_context[i].config4 = + gpmc_cs_read_reg(i, GPMC_CS_CONFIG4); + gpmc_context.cs_context[i].config5 = + gpmc_cs_read_reg(i, GPMC_CS_CONFIG5); + gpmc_context.cs_context[i].config6 = + gpmc_cs_read_reg(i, GPMC_CS_CONFIG6); + gpmc_context.cs_context[i].config7 = + gpmc_cs_read_reg(i, GPMC_CS_CONFIG7); + } + } +} + +void omap3_gpmc_restore_context(void) +{ + int i; + + gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig); + gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable); + gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl); + gpmc_write_reg(GPMC_CONFIG, gpmc_context.config); + gpmc_write_reg(GPMC_PREFETCH_CONFIG1, gpmc_context.prefetch_config1); + gpmc_write_reg(GPMC_PREFETCH_CONFIG2, gpmc_context.prefetch_config2); + gpmc_write_reg(GPMC_PREFETCH_CONTROL, gpmc_context.prefetch_control); + for (i = 0; i < gpmc_cs_num; i++) { + if (gpmc_context.cs_context[i].is_valid) { + gpmc_cs_write_reg(i, GPMC_CS_CONFIG1, + gpmc_context.cs_context[i].config1); + gpmc_cs_write_reg(i, GPMC_CS_CONFIG2, + gpmc_context.cs_context[i].config2); + gpmc_cs_write_reg(i, GPMC_CS_CONFIG3, + gpmc_context.cs_context[i].config3); + gpmc_cs_write_reg(i, GPMC_CS_CONFIG4, + gpmc_context.cs_context[i].config4); + gpmc_cs_write_reg(i, GPMC_CS_CONFIG5, + gpmc_context.cs_context[i].config5); + gpmc_cs_write_reg(i, GPMC_CS_CONFIG6, + gpmc_context.cs_context[i].config6); + gpmc_cs_write_reg(i, GPMC_CS_CONFIG7, + gpmc_context.cs_context[i].config7); + } + } +} -- cgit v1.2.3-59-g8ed1b From 007f790c8276271de26416f90d55561bcc96588a Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 28 Nov 2014 14:34:17 +0100 Subject: net: introduce generic switch devices support The goal of this is to provide a possibility to support various switch chips. Drivers should implement relevant ndos to do so. Now there is only one ndo defined: - for getting physical switch id is in place. Note that user can use random port netdevice to access the switch. Signed-off-by: Jiri Pirko Reviewed-by: Thomas Graf Acked-by: Andy Gospodarek Signed-off-by: David S. Miller --- Documentation/networking/switchdev.txt | 59 ++++++++++++++++++++++++++++++++++ MAINTAINERS | 7 ++++ include/linux/netdevice.h | 10 ++++++ include/net/switchdev.h | 30 +++++++++++++++++ net/Kconfig | 1 + net/Makefile | 3 ++ net/switchdev/Kconfig | 13 ++++++++ net/switchdev/Makefile | 5 +++ net/switchdev/switchdev.c | 33 +++++++++++++++++++ 9 files changed, 161 insertions(+) create mode 100644 Documentation/networking/switchdev.txt create mode 100644 include/net/switchdev.h create mode 100644 net/switchdev/Kconfig create mode 100644 net/switchdev/Makefile create mode 100644 net/switchdev/switchdev.c (limited to 'MAINTAINERS') diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt new file mode 100644 index 000000000000..f981a9295a39 --- /dev/null +++ b/Documentation/networking/switchdev.txt @@ -0,0 +1,59 @@ +Switch (and switch-ish) device drivers HOWTO +=========================== + +Please note that the word "switch" is here used in very generic meaning. +This include devices supporting L2/L3 but also various flow offloading chips, +including switches embedded into SR-IOV NICs. + +Lets describe a topology a bit. Imagine the following example: + + +----------------------------+ +---------------+ + | SOME switch chip | | CPU | + +----------------------------+ +---------------+ + port1 port2 port3 port4 MNGMNT | PCI-E | + | | | | | +---------------+ + PHY PHY | | | | NIC0 NIC1 + | | | | | | + | | +- PCI-E -+ | | + | +------- MII -------+ | + +------------- MII ------------+ + +In this example, there are two independent lines between the switch silicon +and CPU. NIC0 and NIC1 drivers are not aware of a switch presence. They are +separate from the switch driver. SOME switch chip is by managed by a driver +via PCI-E device MNGMNT. Note that MNGMNT device, NIC0 and NIC1 may be +connected to some other type of bus. + +Now, for the previous example show the representation in kernel: + + +----------------------------+ +---------------+ + | SOME switch chip | | CPU | + +----------------------------+ +---------------+ + sw0p0 sw0p1 sw0p2 sw0p3 MNGMNT | PCI-E | + | | | | | +---------------+ + PHY PHY | | | | eth0 eth1 + | | | | | | + | | +- PCI-E -+ | | + | +------- MII -------+ | + +------------- MII ------------+ + +Lets call the example switch driver for SOME switch chip "SOMEswitch". This +driver takes care of PCI-E device MNGMNT. There is a netdevice instance sw0pX +created for each port of a switch. These netdevices are instances +of "SOMEswitch" driver. sw0pX netdevices serve as a "representation" +of the switch chip. eth0 and eth1 are instances of some other existing driver. + +The only difference of the switch-port netdevice from the ordinary netdevice +is that is implements couple more NDOs: + + ndo_switch_parent_id_get - This returns the same ID for two port netdevices + of the same physical switch chip. This is + mandatory to be implemented by all switch drivers + and serves the caller for recognition of a port + netdevice. + ndo_switch_parent_* - Functions that serve for a manipulation of the switch + chip itself (it can be though of as a "parent" of the + port, therefore the name). They are not port-specific. + Caller might use arbitrary port netdevice of the same + switch and it will make no difference. + ndo_switch_port_* - Functions that serve for a port-specific manipulation. diff --git a/MAINTAINERS b/MAINTAINERS index 6b880deae3d2..3aba0ac2d5a3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9059,6 +9059,13 @@ F: lib/swiotlb.c F: arch/*/kernel/pci-swiotlb.c F: include/linux/swiotlb.h +SWITCHDEV +M: Jiri Pirko +L: netdev@vger.kernel.org +S: Supported +F: net/switchdev/ +F: include/net/switchdev.h + SYNOPSYS ARC ARCHITECTURE M: Vineet Gupta S: Supported diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 4bd41d72559d..3603f31e78f3 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1018,6 +1018,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * performing GSO on a packet. The device returns true if it is * able to GSO the packet, false otherwise. If the return value is * false the stack will do software GSO. + * + * int (*ndo_switch_parent_id_get)(struct net_device *dev, + * struct netdev_phys_item_id *psid); + * Called to get an ID of the switch chip this port is part of. + * If driver implements this, it indicates that it represents a port + * of a switch chip. */ struct net_device_ops { int (*ndo_init)(struct net_device *dev); @@ -1171,6 +1177,10 @@ struct net_device_ops { int (*ndo_get_lock_subclass)(struct net_device *dev); bool (*ndo_gso_check) (struct sk_buff *skb, struct net_device *dev); +#ifdef CONFIG_NET_SWITCHDEV + int (*ndo_switch_parent_id_get)(struct net_device *dev, + struct netdev_phys_item_id *psid); +#endif }; /** diff --git a/include/net/switchdev.h b/include/net/switchdev.h new file mode 100644 index 000000000000..7a52360a1446 --- /dev/null +++ b/include/net/switchdev.h @@ -0,0 +1,30 @@ +/* + * include/net/switchdev.h - Switch device API + * Copyright (c) 2014 Jiri Pirko + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef _LINUX_SWITCHDEV_H_ +#define _LINUX_SWITCHDEV_H_ + +#include + +#ifdef CONFIG_NET_SWITCHDEV + +int netdev_switch_parent_id_get(struct net_device *dev, + struct netdev_phys_item_id *psid); + +#else + +static inline int netdev_switch_parent_id_get(struct net_device *dev, + struct netdev_phys_item_id *psid) +{ + return -EOPNOTSUPP; +} + +#endif + +#endif /* _LINUX_SWITCHDEV_H_ */ diff --git a/net/Kconfig b/net/Kconfig index 99815b5454bf..ff9ffc17fa0e 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -228,6 +228,7 @@ source "net/vmw_vsock/Kconfig" source "net/netlink/Kconfig" source "net/mpls/Kconfig" source "net/hsr/Kconfig" +source "net/switchdev/Kconfig" config RPS boolean diff --git a/net/Makefile b/net/Makefile index 7ed1970074b0..95fc694e4ddc 100644 --- a/net/Makefile +++ b/net/Makefile @@ -73,3 +73,6 @@ obj-$(CONFIG_OPENVSWITCH) += openvswitch/ obj-$(CONFIG_VSOCKETS) += vmw_vsock/ obj-$(CONFIG_NET_MPLS_GSO) += mpls/ obj-$(CONFIG_HSR) += hsr/ +ifneq ($(CONFIG_NET_SWITCHDEV),) +obj-y += switchdev/ +endif diff --git a/net/switchdev/Kconfig b/net/switchdev/Kconfig new file mode 100644 index 000000000000..155754588fd6 --- /dev/null +++ b/net/switchdev/Kconfig @@ -0,0 +1,13 @@ +# +# Configuration for Switch device support +# + +config NET_SWITCHDEV + boolean "Switch (and switch-ish) device support (EXPERIMENTAL)" + depends on INET + ---help--- + This module provides glue between core networking code and device + drivers in order to support hardware switch chips in very generic + meaning of the word "switch". This include devices supporting L2/L3 but + also various flow offloading chips, including switches embedded into + SR-IOV NICs. diff --git a/net/switchdev/Makefile b/net/switchdev/Makefile new file mode 100644 index 000000000000..5ed63ed324d0 --- /dev/null +++ b/net/switchdev/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Switch device API +# + +obj-$(CONFIG_NET_SWITCHDEV) += switchdev.o diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c new file mode 100644 index 000000000000..66973deaae56 --- /dev/null +++ b/net/switchdev/switchdev.c @@ -0,0 +1,33 @@ +/* + * net/switchdev/switchdev.c - Switch device API + * Copyright (c) 2014 Jiri Pirko + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include + +/** + * netdev_switch_parent_id_get - Get ID of a switch + * @dev: port device + * @psid: switch ID + * + * Get ID of a switch this port is part of. + */ +int netdev_switch_parent_id_get(struct net_device *dev, + struct netdev_phys_item_id *psid) +{ + const struct net_device_ops *ops = dev->netdev_ops; + + if (!ops->ndo_switch_parent_id_get) + return -EOPNOTSUPP; + return ops->ndo_switch_parent_id_get(dev, psid); +} +EXPORT_SYMBOL(netdev_switch_parent_id_get); -- cgit v1.2.3-59-g8ed1b From 4b8ac9660af07be6f6602b523929793eed314997 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 28 Nov 2014 14:34:26 +0100 Subject: rocker: introduce rocker switch driver This patch introduces the first driver to benefit from the switchdev infrastructure and to implement newly introduced switch ndos. This is a driver for emulated switch chip implemented in qemu: https://github.com/sfeldma/qemu-rocker/ This patch is a result of joint work with Scott Feldman. Signed-off-by: Scott Feldman Signed-off-by: Jiri Pirko Reviewed-by: Thomas Graf Reviewed-by: John Fastabend Signed-off-by: David S. Miller --- MAINTAINERS | 7 + drivers/net/ethernet/Kconfig | 1 + drivers/net/ethernet/Makefile | 1 + drivers/net/ethernet/rocker/Kconfig | 27 + drivers/net/ethernet/rocker/Makefile | 5 + drivers/net/ethernet/rocker/rocker.c | 2060 ++++++++++++++++++++++++++++++++++ drivers/net/ethernet/rocker/rocker.h | 427 +++++++ 7 files changed, 2528 insertions(+) create mode 100644 drivers/net/ethernet/rocker/Kconfig create mode 100644 drivers/net/ethernet/rocker/Makefile create mode 100644 drivers/net/ethernet/rocker/rocker.c create mode 100644 drivers/net/ethernet/rocker/rocker.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 3aba0ac2d5a3..c95153559ed2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7865,6 +7865,13 @@ F: drivers/hid/hid-roccat* F: include/linux/hid-roccat* F: Documentation/ABI/*/sysfs-driver-hid-roccat* +ROCKER DRIVER +M: Jiri Pirko +M: Scott Feldman +L: netdev@vger.kernel.org +S: Supported +F: drivers/net/ethernet/rocker/ + ROCKETPORT DRIVER P: Comtrol Corp. W: http://www.comtrol.com diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 1ed1fbba5d58..df76050d0a9d 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -155,6 +155,7 @@ source "drivers/net/ethernet/qualcomm/Kconfig" source "drivers/net/ethernet/realtek/Kconfig" source "drivers/net/ethernet/renesas/Kconfig" source "drivers/net/ethernet/rdc/Kconfig" +source "drivers/net/ethernet/rocker/Kconfig" config S6GMAC tristate "S6105 GMAC ethernet support" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 6e0b629e9859..bf56f8b36e90 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -65,6 +65,7 @@ obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/ obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/ obj-$(CONFIG_SH_ETH) += renesas/ obj-$(CONFIG_NET_VENDOR_RDC) += rdc/ +obj-$(CONFIG_NET_VENDOR_ROCKER) += rocker/ obj-$(CONFIG_S6GMAC) += s6gmac.o obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/ obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/ diff --git a/drivers/net/ethernet/rocker/Kconfig b/drivers/net/ethernet/rocker/Kconfig new file mode 100644 index 000000000000..11a850eab628 --- /dev/null +++ b/drivers/net/ethernet/rocker/Kconfig @@ -0,0 +1,27 @@ +# +# Rocker device configuration +# + +config NET_VENDOR_ROCKER + bool "Rocker devices" + default y + ---help--- + If you have a network device belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Rocker devices. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_ROCKER + +config ROCKER + tristate "Rocker switch driver (EXPERIMENTAL)" + depends on PCI && NET_SWITCHDEV + ---help--- + This driver supports Rocker switch device. + + To compile this driver as a module, choose M here: the + module will be called rocker. + +endif # NET_VENDOR_ROCKER diff --git a/drivers/net/ethernet/rocker/Makefile b/drivers/net/ethernet/rocker/Makefile new file mode 100644 index 000000000000..f85fb12f36f1 --- /dev/null +++ b/drivers/net/ethernet/rocker/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Rocker network device drivers. +# + +obj-$(CONFIG_ROCKER) += rocker.o diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c new file mode 100644 index 000000000000..a53011c2b4aa --- /dev/null +++ b/drivers/net/ethernet/rocker/rocker.c @@ -0,0 +1,2060 @@ +/* + * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver + * Copyright (c) 2014 Jiri Pirko + * Copyright (c) 2014 Scott Feldman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rocker.h" + +static const char rocker_driver_name[] = "rocker"; + +static const struct pci_device_id rocker_pci_id_table[] = { + {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0}, + {0, } +}; + +struct rocker_desc_info { + char *data; /* mapped */ + size_t data_size; + size_t tlv_size; + struct rocker_desc *desc; + DEFINE_DMA_UNMAP_ADDR(mapaddr); +}; + +struct rocker_dma_ring_info { + size_t size; + u32 head; + u32 tail; + struct rocker_desc *desc; /* mapped */ + dma_addr_t mapaddr; + struct rocker_desc_info *desc_info; + unsigned int type; +}; + +struct rocker; + +struct rocker_port { + struct net_device *dev; + struct rocker *rocker; + unsigned int port_number; + u32 lport; + struct napi_struct napi_tx; + struct napi_struct napi_rx; + struct rocker_dma_ring_info tx_ring; + struct rocker_dma_ring_info rx_ring; +}; + +struct rocker { + struct pci_dev *pdev; + u8 __iomem *hw_addr; + struct msix_entry *msix_entries; + unsigned int port_count; + struct rocker_port **ports; + struct { + u64 id; + } hw; + spinlock_t cmd_ring_lock; + struct rocker_dma_ring_info cmd_ring; + struct rocker_dma_ring_info event_ring; +}; + +struct rocker_wait { + wait_queue_head_t wait; + bool done; + bool nowait; +}; + +static void rocker_wait_reset(struct rocker_wait *wait) +{ + wait->done = false; + wait->nowait = false; +} + +static void rocker_wait_init(struct rocker_wait *wait) +{ + init_waitqueue_head(&wait->wait); + rocker_wait_reset(wait); +} + +static struct rocker_wait *rocker_wait_create(gfp_t gfp) +{ + struct rocker_wait *wait; + + wait = kmalloc(sizeof(*wait), gfp); + if (!wait) + return NULL; + rocker_wait_init(wait); + return wait; +} + +static void rocker_wait_destroy(struct rocker_wait *work) +{ + kfree(work); +} + +static bool rocker_wait_event_timeout(struct rocker_wait *wait, + unsigned long timeout) +{ + wait_event_timeout(wait->wait, wait->done, HZ / 10); + if (!wait->done) + return false; + return true; +} + +static void rocker_wait_wake_up(struct rocker_wait *wait) +{ + wait->done = true; + wake_up(&wait->wait); +} + +static u32 rocker_msix_vector(struct rocker *rocker, unsigned int vector) +{ + return rocker->msix_entries[vector].vector; +} + +static u32 rocker_msix_tx_vector(struct rocker_port *rocker_port) +{ + return rocker_msix_vector(rocker_port->rocker, + ROCKER_MSIX_VEC_TX(rocker_port->port_number)); +} + +static u32 rocker_msix_rx_vector(struct rocker_port *rocker_port) +{ + return rocker_msix_vector(rocker_port->rocker, + ROCKER_MSIX_VEC_RX(rocker_port->port_number)); +} + +#define rocker_write32(rocker, reg, val) \ + writel((val), (rocker)->hw_addr + (ROCKER_ ## reg)) +#define rocker_read32(rocker, reg) \ + readl((rocker)->hw_addr + (ROCKER_ ## reg)) +#define rocker_write64(rocker, reg, val) \ + writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg)) +#define rocker_read64(rocker, reg) \ + readq((rocker)->hw_addr + (ROCKER_ ## reg)) + +/***************************** + * HW basic testing functions + *****************************/ + +static int rocker_reg_test(struct rocker *rocker) +{ + struct pci_dev *pdev = rocker->pdev; + u64 test_reg; + u64 rnd; + + rnd = prandom_u32(); + rnd >>= 1; + rocker_write32(rocker, TEST_REG, rnd); + test_reg = rocker_read32(rocker, TEST_REG); + if (test_reg != rnd * 2) { + dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n", + test_reg, rnd * 2); + return -EIO; + } + + rnd = prandom_u32(); + rnd <<= 31; + rnd |= prandom_u32(); + rocker_write64(rocker, TEST_REG64, rnd); + test_reg = rocker_read64(rocker, TEST_REG64); + if (test_reg != rnd * 2) { + dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n", + test_reg, rnd * 2); + return -EIO; + } + + return 0; +} + +static int rocker_dma_test_one(struct rocker *rocker, struct rocker_wait *wait, + u32 test_type, dma_addr_t dma_handle, + unsigned char *buf, unsigned char *expect, + size_t size) +{ + struct pci_dev *pdev = rocker->pdev; + int i; + + rocker_wait_reset(wait); + rocker_write32(rocker, TEST_DMA_CTRL, test_type); + + if (!rocker_wait_event_timeout(wait, HZ / 10)) { + dev_err(&pdev->dev, "no interrupt received within a timeout\n"); + return -EIO; + } + + for (i = 0; i < size; i++) { + if (buf[i] != expect[i]) { + dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected", + buf[i], i, expect[i]); + return -EIO; + } + } + return 0; +} + +#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4) +#define ROCKER_TEST_DMA_FILL_PATTERN 0x96 + +static int rocker_dma_test_offset(struct rocker *rocker, + struct rocker_wait *wait, int offset) +{ + struct pci_dev *pdev = rocker->pdev; + unsigned char *alloc; + unsigned char *buf; + unsigned char *expect; + dma_addr_t dma_handle; + int i; + int err; + + alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset, + GFP_KERNEL | GFP_DMA); + if (!alloc) + return -ENOMEM; + buf = alloc + offset; + expect = buf + ROCKER_TEST_DMA_BUF_SIZE; + + dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE, + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(pdev, dma_handle)) { + err = -EIO; + goto free_alloc; + } + + rocker_write64(rocker, TEST_DMA_ADDR, dma_handle); + rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE); + + memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE); + err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL, + dma_handle, buf, expect, + ROCKER_TEST_DMA_BUF_SIZE); + if (err) + goto unmap; + + memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE); + err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR, + dma_handle, buf, expect, + ROCKER_TEST_DMA_BUF_SIZE); + if (err) + goto unmap; + + prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE); + for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++) + expect[i] = ~buf[i]; + err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT, + dma_handle, buf, expect, + ROCKER_TEST_DMA_BUF_SIZE); + if (err) + goto unmap; + +unmap: + pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE, + PCI_DMA_BIDIRECTIONAL); +free_alloc: + kfree(alloc); + + return err; +} + +static int rocker_dma_test(struct rocker *rocker, struct rocker_wait *wait) +{ + int i; + int err; + + for (i = 0; i < 8; i++) { + err = rocker_dma_test_offset(rocker, wait, i); + if (err) + return err; + } + return 0; +} + +static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id) +{ + struct rocker_wait *wait = dev_id; + + rocker_wait_wake_up(wait); + + return IRQ_HANDLED; +} + +static int rocker_basic_hw_test(struct rocker *rocker) +{ + struct pci_dev *pdev = rocker->pdev; + struct rocker_wait wait; + int err; + + err = rocker_reg_test(rocker); + if (err) { + dev_err(&pdev->dev, "reg test failed\n"); + return err; + } + + err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), + rocker_test_irq_handler, 0, + rocker_driver_name, &wait); + if (err) { + dev_err(&pdev->dev, "cannot assign test irq\n"); + return err; + } + + rocker_wait_init(&wait); + rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST); + + if (!rocker_wait_event_timeout(&wait, HZ / 10)) { + dev_err(&pdev->dev, "no interrupt received within a timeout\n"); + err = -EIO; + goto free_irq; + } + + err = rocker_dma_test(rocker, &wait); + if (err) + dev_err(&pdev->dev, "dma test failed\n"); + +free_irq: + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait); + return err; +} + +/****** + * TLV + ******/ + +#define ROCKER_TLV_ALIGNTO 8U +#define ROCKER_TLV_ALIGN(len) \ + (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1)) +#define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv)) + +/* <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) ---> + * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+ + * | Header | Pad | Payload | Pad | + * | (struct rocker_tlv) | ing | | ing | + * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+ + * <--------------------------- tlv->len --------------------------> + */ + +static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv, + int *remaining) +{ + int totlen = ROCKER_TLV_ALIGN(tlv->len); + + *remaining -= totlen; + return (struct rocker_tlv *) ((char *) tlv + totlen); +} + +static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining) +{ + return remaining >= (int) ROCKER_TLV_HDRLEN && + tlv->len >= ROCKER_TLV_HDRLEN && + tlv->len <= remaining; +} + +#define rocker_tlv_for_each(pos, head, len, rem) \ + for (pos = head, rem = len; \ + rocker_tlv_ok(pos, rem); \ + pos = rocker_tlv_next(pos, &(rem))) + +#define rocker_tlv_for_each_nested(pos, tlv, rem) \ + rocker_tlv_for_each(pos, rocker_tlv_data(tlv), \ + rocker_tlv_len(tlv), rem) + +static int rocker_tlv_attr_size(int payload) +{ + return ROCKER_TLV_HDRLEN + payload; +} + +static int rocker_tlv_total_size(int payload) +{ + return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload)); +} + +static int rocker_tlv_padlen(int payload) +{ + return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload); +} + +static int rocker_tlv_type(const struct rocker_tlv *tlv) +{ + return tlv->type; +} + +static void *rocker_tlv_data(const struct rocker_tlv *tlv) +{ + return (char *) tlv + ROCKER_TLV_HDRLEN; +} + +static int rocker_tlv_len(const struct rocker_tlv *tlv) +{ + return tlv->len - ROCKER_TLV_HDRLEN; +} + +static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv) +{ + return *(u8 *) rocker_tlv_data(tlv); +} + +static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv) +{ + return *(u16 *) rocker_tlv_data(tlv); +} + +static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv) +{ + return *(u32 *) rocker_tlv_data(tlv); +} + +static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv) +{ + return *(u64 *) rocker_tlv_data(tlv); +} + +static void rocker_tlv_parse(struct rocker_tlv **tb, int maxtype, + const char *buf, int buf_len) +{ + const struct rocker_tlv *tlv; + const struct rocker_tlv *head = (const struct rocker_tlv *) buf; + int rem; + + memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1)); + + rocker_tlv_for_each(tlv, head, buf_len, rem) { + u32 type = rocker_tlv_type(tlv); + + if (type > 0 && type <= maxtype) + tb[type] = (struct rocker_tlv *) tlv; + } +} + +static void rocker_tlv_parse_nested(struct rocker_tlv **tb, int maxtype, + const struct rocker_tlv *tlv) +{ + rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv), + rocker_tlv_len(tlv)); +} + +static void rocker_tlv_parse_desc(struct rocker_tlv **tb, int maxtype, + struct rocker_desc_info *desc_info) +{ + rocker_tlv_parse(tb, maxtype, desc_info->data, + desc_info->desc->tlv_size); +} + +static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info) +{ + return (struct rocker_tlv *) ((char *) desc_info->data + + desc_info->tlv_size); +} + +static int rocker_tlv_put(struct rocker_desc_info *desc_info, + int attrtype, int attrlen, const void *data) +{ + int tail_room = desc_info->data_size - desc_info->tlv_size; + int total_size = rocker_tlv_total_size(attrlen); + struct rocker_tlv *tlv; + + if (unlikely(tail_room < total_size)) + return -EMSGSIZE; + + tlv = rocker_tlv_start(desc_info); + desc_info->tlv_size += total_size; + tlv->type = attrtype; + tlv->len = rocker_tlv_attr_size(attrlen); + memcpy(rocker_tlv_data(tlv), data, attrlen); + memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen)); + return 0; +} + +static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info, + int attrtype, u8 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value); +} + +static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info, + int attrtype, u16 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value); +} + +static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info, + int attrtype, u32 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value); +} + +static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info, + int attrtype, u64 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value); +} + +static struct rocker_tlv * +rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype) +{ + struct rocker_tlv *start = rocker_tlv_start(desc_info); + + if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0) + return NULL; + + return start; +} + +static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info, + struct rocker_tlv *start) +{ + start->len = (char *) rocker_tlv_start(desc_info) - (char *) start; +} + +static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info, + struct rocker_tlv *start) +{ + desc_info->tlv_size = (char *) start - desc_info->data; +} + +/****************************************** + * DMA rings and descriptors manipulations + ******************************************/ + +static u32 __pos_inc(u32 pos, size_t limit) +{ + return ++pos == limit ? 0 : pos; +} + +static int rocker_desc_err(struct rocker_desc_info *desc_info) +{ + return -(desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN); +} + +static void rocker_desc_gen_clear(struct rocker_desc_info *desc_info) +{ + desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN; +} + +static bool rocker_desc_gen(struct rocker_desc_info *desc_info) +{ + u32 comp_err = desc_info->desc->comp_err; + + return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false; +} + +static void *rocker_desc_cookie_ptr_get(struct rocker_desc_info *desc_info) +{ + return (void *) desc_info->desc->cookie; +} + +static void rocker_desc_cookie_ptr_set(struct rocker_desc_info *desc_info, + void *ptr) +{ + desc_info->desc->cookie = (long) ptr; +} + +static struct rocker_desc_info * +rocker_desc_head_get(struct rocker_dma_ring_info *info) +{ + static struct rocker_desc_info *desc_info; + u32 head = __pos_inc(info->head, info->size); + + desc_info = &info->desc_info[info->head]; + if (head == info->tail) + return NULL; /* ring full */ + desc_info->tlv_size = 0; + return desc_info; +} + +static void rocker_desc_commit(struct rocker_desc_info *desc_info) +{ + desc_info->desc->buf_size = desc_info->data_size; + desc_info->desc->tlv_size = desc_info->tlv_size; +} + +static void rocker_desc_head_set(struct rocker *rocker, + struct rocker_dma_ring_info *info, + struct rocker_desc_info *desc_info) +{ + u32 head = __pos_inc(info->head, info->size); + + BUG_ON(head == info->tail); + rocker_desc_commit(desc_info); + info->head = head; + rocker_write32(rocker, DMA_DESC_HEAD(info->type), head); +} + +static struct rocker_desc_info * +rocker_desc_tail_get(struct rocker_dma_ring_info *info) +{ + static struct rocker_desc_info *desc_info; + + if (info->tail == info->head) + return NULL; /* nothing to be done between head and tail */ + desc_info = &info->desc_info[info->tail]; + if (!rocker_desc_gen(desc_info)) + return NULL; /* gen bit not set, desc is not ready yet */ + info->tail = __pos_inc(info->tail, info->size); + desc_info->tlv_size = desc_info->desc->tlv_size; + return desc_info; +} + +static void rocker_dma_ring_credits_set(struct rocker *rocker, + struct rocker_dma_ring_info *info, + u32 credits) +{ + if (credits) + rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits); +} + +static unsigned long rocker_dma_ring_size_fix(size_t size) +{ + return max(ROCKER_DMA_SIZE_MIN, + min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX)); +} + +static int rocker_dma_ring_create(struct rocker *rocker, + unsigned int type, + size_t size, + struct rocker_dma_ring_info *info) +{ + int i; + + BUG_ON(size != rocker_dma_ring_size_fix(size)); + info->size = size; + info->type = type; + info->head = 0; + info->tail = 0; + info->desc_info = kcalloc(info->size, sizeof(*info->desc_info), + GFP_KERNEL); + if (!info->desc_info) + return -ENOMEM; + + info->desc = pci_alloc_consistent(rocker->pdev, + info->size * sizeof(*info->desc), + &info->mapaddr); + if (!info->desc) { + kfree(info->desc_info); + return -ENOMEM; + } + + for (i = 0; i < info->size; i++) + info->desc_info[i].desc = &info->desc[i]; + + rocker_write32(rocker, DMA_DESC_CTRL(info->type), + ROCKER_DMA_DESC_CTRL_RESET); + rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr); + rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size); + + return 0; +} + +static void rocker_dma_ring_destroy(struct rocker *rocker, + struct rocker_dma_ring_info *info) +{ + rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0); + + pci_free_consistent(rocker->pdev, + info->size * sizeof(struct rocker_desc), + info->desc, info->mapaddr); + kfree(info->desc_info); +} + +static void rocker_dma_ring_pass_to_producer(struct rocker *rocker, + struct rocker_dma_ring_info *info) +{ + int i; + + BUG_ON(info->head || info->tail); + + /* When ring is consumer, we need to advance head for each desc. + * That tells hw that the desc is ready to be used by it. + */ + for (i = 0; i < info->size - 1; i++) + rocker_desc_head_set(rocker, info, &info->desc_info[i]); + rocker_desc_commit(&info->desc_info[i]); +} + +static int rocker_dma_ring_bufs_alloc(struct rocker *rocker, + struct rocker_dma_ring_info *info, + int direction, size_t buf_size) +{ + struct pci_dev *pdev = rocker->pdev; + int i; + int err; + + for (i = 0; i < info->size; i++) { + struct rocker_desc_info *desc_info = &info->desc_info[i]; + struct rocker_desc *desc = &info->desc[i]; + dma_addr_t dma_handle; + char *buf; + + buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA); + if (!buf) { + err = -ENOMEM; + goto rollback; + } + + dma_handle = pci_map_single(pdev, buf, buf_size, direction); + if (pci_dma_mapping_error(pdev, dma_handle)) { + kfree(buf); + err = -EIO; + goto rollback; + } + + desc_info->data = buf; + desc_info->data_size = buf_size; + dma_unmap_addr_set(desc_info, mapaddr, dma_handle); + + desc->buf_addr = dma_handle; + desc->buf_size = buf_size; + } + return 0; + +rollback: + for (i--; i >= 0; i--) { + struct rocker_desc_info *desc_info = &info->desc_info[i]; + + pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr), + desc_info->data_size, direction); + kfree(desc_info->data); + } + return err; +} + +static void rocker_dma_ring_bufs_free(struct rocker *rocker, + struct rocker_dma_ring_info *info, + int direction) +{ + struct pci_dev *pdev = rocker->pdev; + int i; + + for (i = 0; i < info->size; i++) { + struct rocker_desc_info *desc_info = &info->desc_info[i]; + struct rocker_desc *desc = &info->desc[i]; + + desc->buf_addr = 0; + desc->buf_size = 0; + pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr), + desc_info->data_size, direction); + kfree(desc_info->data); + } +} + +static int rocker_dma_rings_init(struct rocker *rocker) +{ + struct pci_dev *pdev = rocker->pdev; + int err; + + err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD, + ROCKER_DMA_CMD_DEFAULT_SIZE, + &rocker->cmd_ring); + if (err) { + dev_err(&pdev->dev, "failed to create command dma ring\n"); + return err; + } + + spin_lock_init(&rocker->cmd_ring_lock); + + err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring, + PCI_DMA_BIDIRECTIONAL, PAGE_SIZE); + if (err) { + dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n"); + goto err_dma_cmd_ring_bufs_alloc; + } + + err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT, + ROCKER_DMA_EVENT_DEFAULT_SIZE, + &rocker->event_ring); + if (err) { + dev_err(&pdev->dev, "failed to create event dma ring\n"); + goto err_dma_event_ring_create; + } + + err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring, + PCI_DMA_FROMDEVICE, PAGE_SIZE); + if (err) { + dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n"); + goto err_dma_event_ring_bufs_alloc; + } + rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring); + return 0; + +err_dma_event_ring_bufs_alloc: + rocker_dma_ring_destroy(rocker, &rocker->event_ring); +err_dma_event_ring_create: + rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, + PCI_DMA_BIDIRECTIONAL); +err_dma_cmd_ring_bufs_alloc: + rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); + return err; +} + +static void rocker_dma_rings_fini(struct rocker *rocker) +{ + rocker_dma_ring_bufs_free(rocker, &rocker->event_ring, + PCI_DMA_BIDIRECTIONAL); + rocker_dma_ring_destroy(rocker, &rocker->event_ring); + rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, + PCI_DMA_BIDIRECTIONAL); + rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); +} + +static int rocker_dma_rx_ring_skb_map(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + struct sk_buff *skb, size_t buf_len) +{ + struct pci_dev *pdev = rocker->pdev; + dma_addr_t dma_handle; + + dma_handle = pci_map_single(pdev, skb->data, buf_len, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(pdev, dma_handle)) + return -EIO; + if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle)) + goto tlv_put_failure; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len)) + goto tlv_put_failure; + return 0; + +tlv_put_failure: + pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE); + desc_info->tlv_size = 0; + return -EMSGSIZE; +} + +static size_t rocker_port_rx_buf_len(struct rocker_port *rocker_port) +{ + return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; +} + +static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info) +{ + struct net_device *dev = rocker_port->dev; + struct sk_buff *skb; + size_t buf_len = rocker_port_rx_buf_len(rocker_port); + int err; + + /* Ensure that hw will see tlv_size zero in case of an error. + * That tells hw to use another descriptor. + */ + rocker_desc_cookie_ptr_set(desc_info, NULL); + desc_info->tlv_size = 0; + + skb = netdev_alloc_skb_ip_align(dev, buf_len); + if (!skb) + return -ENOMEM; + err = rocker_dma_rx_ring_skb_map(rocker, rocker_port, desc_info, + skb, buf_len); + if (err) { + dev_kfree_skb_any(skb); + return err; + } + rocker_desc_cookie_ptr_set(desc_info, skb); + return 0; +} + +static void rocker_dma_rx_ring_skb_unmap(struct rocker *rocker, + struct rocker_tlv **attrs) +{ + struct pci_dev *pdev = rocker->pdev; + dma_addr_t dma_handle; + size_t len; + + if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] || + !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]) + return; + dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]); + len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]); + pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE); +} + +static void rocker_dma_rx_ring_skb_free(struct rocker *rocker, + struct rocker_desc_info *desc_info) +{ + struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1]; + struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info); + + if (!skb) + return; + rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info); + rocker_dma_rx_ring_skb_unmap(rocker, attrs); + dev_kfree_skb_any(skb); +} + +static int rocker_dma_rx_ring_skbs_alloc(struct rocker *rocker, + struct rocker_port *rocker_port) +{ + struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; + int i; + int err; + + for (i = 0; i < rx_ring->size; i++) { + err = rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, + &rx_ring->desc_info[i]); + if (err) + goto rollback; + } + return 0; + +rollback: + for (i--; i >= 0; i--) + rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]); + return err; +} + +static void rocker_dma_rx_ring_skbs_free(struct rocker *rocker, + struct rocker_port *rocker_port) +{ + struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; + int i; + + for (i = 0; i < rx_ring->size; i++) + rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]); +} + +static int rocker_port_dma_rings_init(struct rocker_port *rocker_port) +{ + struct rocker *rocker = rocker_port->rocker; + int err; + + err = rocker_dma_ring_create(rocker, + ROCKER_DMA_TX(rocker_port->port_number), + ROCKER_DMA_TX_DEFAULT_SIZE, + &rocker_port->tx_ring); + if (err) { + netdev_err(rocker_port->dev, "failed to create tx dma ring\n"); + return err; + } + + err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring, + PCI_DMA_TODEVICE, + ROCKER_DMA_TX_DESC_SIZE); + if (err) { + netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n"); + goto err_dma_tx_ring_bufs_alloc; + } + + err = rocker_dma_ring_create(rocker, + ROCKER_DMA_RX(rocker_port->port_number), + ROCKER_DMA_RX_DEFAULT_SIZE, + &rocker_port->rx_ring); + if (err) { + netdev_err(rocker_port->dev, "failed to create rx dma ring\n"); + goto err_dma_rx_ring_create; + } + + err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring, + PCI_DMA_BIDIRECTIONAL, + ROCKER_DMA_RX_DESC_SIZE); + if (err) { + netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n"); + goto err_dma_rx_ring_bufs_alloc; + } + + err = rocker_dma_rx_ring_skbs_alloc(rocker, rocker_port); + if (err) { + netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n"); + goto err_dma_rx_ring_skbs_alloc; + } + rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring); + + return 0; + +err_dma_rx_ring_skbs_alloc: + rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, + PCI_DMA_BIDIRECTIONAL); +err_dma_rx_ring_bufs_alloc: + rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); +err_dma_rx_ring_create: + rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring, + PCI_DMA_TODEVICE); +err_dma_tx_ring_bufs_alloc: + rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring); + return err; +} + +static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port) +{ + struct rocker *rocker = rocker_port->rocker; + + rocker_dma_rx_ring_skbs_free(rocker, rocker_port); + rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, + PCI_DMA_BIDIRECTIONAL); + rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); + rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring, + PCI_DMA_TODEVICE); + rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring); +} + +static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable) +{ + u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE); + + if (enable) + val |= 1 << rocker_port->lport; + else + val &= ~(1 << rocker_port->lport); + rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val); +} + +/******************************** + * Interrupt handler and helpers + ********************************/ + +static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id) +{ + struct rocker *rocker = dev_id; + struct rocker_desc_info *desc_info; + struct rocker_wait *wait; + u32 credits = 0; + + spin_lock(&rocker->cmd_ring_lock); + while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) { + wait = rocker_desc_cookie_ptr_get(desc_info); + if (wait->nowait) { + rocker_desc_gen_clear(desc_info); + rocker_wait_destroy(wait); + } else { + rocker_wait_wake_up(wait); + } + credits++; + } + spin_unlock(&rocker->cmd_ring_lock); + rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits); + + return IRQ_HANDLED; +} + +static void rocker_port_link_up(struct rocker_port *rocker_port) +{ + netif_carrier_on(rocker_port->dev); + netdev_info(rocker_port->dev, "Link is up\n"); +} + +static void rocker_port_link_down(struct rocker_port *rocker_port) +{ + netif_carrier_off(rocker_port->dev); + netdev_info(rocker_port->dev, "Link is down\n"); +} + +static int rocker_event_link_change(struct rocker *rocker, + const struct rocker_tlv *info) +{ + struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1]; + unsigned int port_number; + bool link_up; + struct rocker_port *rocker_port; + + rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info); + if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LPORT] || + !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]) + return -EIO; + port_number = + rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LPORT]) - 1; + link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]); + + if (port_number >= rocker->port_count) + return -EINVAL; + + rocker_port = rocker->ports[port_number]; + if (netif_carrier_ok(rocker_port->dev) != link_up) { + if (link_up) + rocker_port_link_up(rocker_port); + else + rocker_port_link_down(rocker_port); + } + + return 0; +} + +static int rocker_event_process(struct rocker *rocker, + struct rocker_desc_info *desc_info) +{ + struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1]; + struct rocker_tlv *info; + u16 type; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info); + if (!attrs[ROCKER_TLV_EVENT_TYPE] || + !attrs[ROCKER_TLV_EVENT_INFO]) + return -EIO; + + type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]); + info = attrs[ROCKER_TLV_EVENT_INFO]; + + switch (type) { + case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED: + return rocker_event_link_change(rocker, info); + } + + return -EOPNOTSUPP; +} + +static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id) +{ + struct rocker *rocker = dev_id; + struct pci_dev *pdev = rocker->pdev; + struct rocker_desc_info *desc_info; + u32 credits = 0; + int err; + + while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) { + err = rocker_desc_err(desc_info); + if (err) { + dev_err(&pdev->dev, "event desc received with err %d\n", + err); + } else { + err = rocker_event_process(rocker, desc_info); + if (err) + dev_err(&pdev->dev, "event processing failed with err %d\n", + err); + } + rocker_desc_gen_clear(desc_info); + rocker_desc_head_set(rocker, &rocker->event_ring, desc_info); + credits++; + } + rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits); + + return IRQ_HANDLED; +} + +static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id) +{ + struct rocker_port *rocker_port = dev_id; + + napi_schedule(&rocker_port->napi_tx); + return IRQ_HANDLED; +} + +static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id) +{ + struct rocker_port *rocker_port = dev_id; + + napi_schedule(&rocker_port->napi_rx); + return IRQ_HANDLED; +} + +/******************** + * Command interface + ********************/ + +typedef int (*rocker_cmd_cb_t)(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv); + +static int rocker_cmd_exec(struct rocker *rocker, + struct rocker_port *rocker_port, + rocker_cmd_cb_t prepare, void *prepare_priv, + rocker_cmd_cb_t process, void *process_priv, + bool nowait) +{ + struct rocker_desc_info *desc_info; + struct rocker_wait *wait; + unsigned long flags; + int err; + + wait = rocker_wait_create(nowait ? GFP_ATOMIC : GFP_KERNEL); + if (!wait) + return -ENOMEM; + wait->nowait = nowait; + + spin_lock_irqsave(&rocker->cmd_ring_lock, flags); + desc_info = rocker_desc_head_get(&rocker->cmd_ring); + if (!desc_info) { + spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags); + err = -EAGAIN; + goto out; + } + err = prepare(rocker, rocker_port, desc_info, prepare_priv); + if (err) { + spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags); + goto out; + } + rocker_desc_cookie_ptr_set(desc_info, wait); + rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info); + spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags); + + if (nowait) + return 0; + + if (!rocker_wait_event_timeout(wait, HZ / 10)) + return -EIO; + + err = rocker_desc_err(desc_info); + if (err) + return err; + + if (process) + err = process(rocker, rocker_port, desc_info, process_priv); + + rocker_desc_gen_clear(desc_info); +out: + rocker_wait_destroy(wait); + return err; +} + +static int +rocker_cmd_get_port_settings_prep(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, + rocker_port->lport)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int +rocker_cmd_get_port_settings_ethtool_proc(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct ethtool_cmd *ecmd = priv; + struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; + struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; + u32 speed; + u8 duplex; + u8 autoneg; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); + if (!attrs[ROCKER_TLV_CMD_INFO]) + return -EIO; + + rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + attrs[ROCKER_TLV_CMD_INFO]); + if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] || + !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] || + !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]) + return -EIO; + + speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]); + duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]); + autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]); + + ecmd->transceiver = XCVR_INTERNAL; + ecmd->supported = SUPPORTED_TP; + ecmd->phy_address = 0xff; + ecmd->port = PORT_TP; + ethtool_cmd_speed_set(ecmd, speed); + ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF; + ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + return 0; +} + +static int +rocker_cmd_get_port_settings_macaddr_proc(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + unsigned char *macaddr = priv; + struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; + struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; + struct rocker_tlv *attr; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); + if (!attrs[ROCKER_TLV_CMD_INFO]) + return -EIO; + + rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + attrs[ROCKER_TLV_CMD_INFO]); + attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR]; + if (!attr) + return -EIO; + + if (rocker_tlv_len(attr) != ETH_ALEN) + return -EINVAL; + + ether_addr_copy(macaddr, rocker_tlv_data(attr)); + return 0; +} + +static int +rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct ethtool_cmd *ecmd = priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, + rocker_port->lport)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, + ethtool_cmd_speed(ecmd))) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, + ecmd->duplex)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, + ecmd->autoneg)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int +rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + unsigned char *macaddr = priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, + rocker_port->lport)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, + ETH_ALEN, macaddr)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port, + struct ethtool_cmd *ecmd) +{ + return rocker_cmd_exec(rocker_port->rocker, rocker_port, + rocker_cmd_get_port_settings_prep, NULL, + rocker_cmd_get_port_settings_ethtool_proc, + ecmd, false); +} + +static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port, + unsigned char *macaddr) +{ + return rocker_cmd_exec(rocker_port->rocker, rocker_port, + rocker_cmd_get_port_settings_prep, NULL, + rocker_cmd_get_port_settings_macaddr_proc, + macaddr, false); +} + +static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port, + struct ethtool_cmd *ecmd) +{ + return rocker_cmd_exec(rocker_port->rocker, rocker_port, + rocker_cmd_set_port_settings_ethtool_prep, + ecmd, NULL, NULL, false); +} + +static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port, + unsigned char *macaddr) +{ + return rocker_cmd_exec(rocker_port->rocker, rocker_port, + rocker_cmd_set_port_settings_macaddr_prep, + macaddr, NULL, NULL, false); +} + +/***************** + * Net device ops + *****************/ + +static int rocker_port_open(struct net_device *dev) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int err; + + err = rocker_port_dma_rings_init(rocker_port); + if (err) + return err; + + err = request_irq(rocker_msix_tx_vector(rocker_port), + rocker_tx_irq_handler, 0, + rocker_driver_name, rocker_port); + if (err) { + netdev_err(rocker_port->dev, "cannot assign tx irq\n"); + goto err_request_tx_irq; + } + + err = request_irq(rocker_msix_rx_vector(rocker_port), + rocker_rx_irq_handler, 0, + rocker_driver_name, rocker_port); + if (err) { + netdev_err(rocker_port->dev, "cannot assign rx irq\n"); + goto err_request_rx_irq; + } + + napi_enable(&rocker_port->napi_tx); + napi_enable(&rocker_port->napi_rx); + rocker_port_set_enable(rocker_port, true); + netif_start_queue(dev); + return 0; + +err_request_rx_irq: + free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); +err_request_tx_irq: + rocker_port_dma_rings_fini(rocker_port); + return err; +} + +static int rocker_port_stop(struct net_device *dev) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + netif_stop_queue(dev); + rocker_port_set_enable(rocker_port, false); + napi_disable(&rocker_port->napi_rx); + napi_disable(&rocker_port->napi_tx); + free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); + free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); + rocker_port_dma_rings_fini(rocker_port); + + return 0; +} + +static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info) +{ + struct rocker *rocker = rocker_port->rocker; + struct pci_dev *pdev = rocker->pdev; + struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1]; + struct rocker_tlv *attr; + int rem; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info); + if (!attrs[ROCKER_TLV_TX_FRAGS]) + return; + rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) { + struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1]; + dma_addr_t dma_handle; + size_t len; + + if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG) + continue; + rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX, + attr); + if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] || + !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]) + continue; + dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]); + len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]); + pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE); + } +} + +static int rocker_tx_desc_frag_map_put(struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + char *buf, size_t buf_len) +{ + struct rocker *rocker = rocker_port->rocker; + struct pci_dev *pdev = rocker->pdev; + dma_addr_t dma_handle; + struct rocker_tlv *frag; + + dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE); + if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) { + if (net_ratelimit()) + netdev_err(rocker_port->dev, "failed to dma map tx frag\n"); + return -EIO; + } + frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG); + if (!frag) + goto unmap_frag; + if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR, + dma_handle)) + goto nest_cancel; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN, + buf_len)) + goto nest_cancel; + rocker_tlv_nest_end(desc_info, frag); + return 0; + +nest_cancel: + rocker_tlv_nest_cancel(desc_info, frag); +unmap_frag: + pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE); + return -EMSGSIZE; +} + +static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + struct rocker *rocker = rocker_port->rocker; + struct rocker_desc_info *desc_info; + struct rocker_tlv *frags; + int i; + int err; + + desc_info = rocker_desc_head_get(&rocker_port->tx_ring); + if (unlikely(!desc_info)) { + if (net_ratelimit()) + netdev_err(dev, "tx ring full when queue awake\n"); + return NETDEV_TX_BUSY; + } + + rocker_desc_cookie_ptr_set(desc_info, skb); + + frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS); + if (!frags) + goto out; + err = rocker_tx_desc_frag_map_put(rocker_port, desc_info, + skb->data, skb_headlen(skb)); + if (err) + goto nest_cancel; + if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) + goto nest_cancel; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + err = rocker_tx_desc_frag_map_put(rocker_port, desc_info, + skb_frag_address(frag), + skb_frag_size(frag)); + if (err) + goto unmap_frags; + } + rocker_tlv_nest_end(desc_info, frags); + + rocker_desc_gen_clear(desc_info); + rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info); + + desc_info = rocker_desc_head_get(&rocker_port->tx_ring); + if (!desc_info) + netif_stop_queue(dev); + + return NETDEV_TX_OK; + +unmap_frags: + rocker_tx_desc_frags_unmap(rocker_port, desc_info); +nest_cancel: + rocker_tlv_nest_cancel(desc_info, frags); +out: + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +static int rocker_port_set_mac_address(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + struct rocker_port *rocker_port = netdev_priv(dev); + int err; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data); + if (err) + return err; + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + return 0; +} + +static int rocker_port_switch_parent_id_get(struct net_device *dev, + struct netdev_phys_item_id *psid) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + struct rocker *rocker = rocker_port->rocker; + + psid->id_len = sizeof(rocker->hw.id); + memcpy(&psid->id, &rocker->hw.id, psid->id_len); + return 0; +} + +static const struct net_device_ops rocker_port_netdev_ops = { + .ndo_open = rocker_port_open, + .ndo_stop = rocker_port_stop, + .ndo_start_xmit = rocker_port_xmit, + .ndo_set_mac_address = rocker_port_set_mac_address, + .ndo_switch_parent_id_get = rocker_port_switch_parent_id_get, +}; + +/******************** + * ethtool interface + ********************/ + +static int rocker_port_get_settings(struct net_device *dev, + struct ethtool_cmd *ecmd) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd); +} + +static int rocker_port_set_settings(struct net_device *dev, + struct ethtool_cmd *ecmd) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd); +} + +static void rocker_port_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version)); +} + +static const struct ethtool_ops rocker_port_ethtool_ops = { + .get_settings = rocker_port_get_settings, + .set_settings = rocker_port_set_settings, + .get_drvinfo = rocker_port_get_drvinfo, + .get_link = ethtool_op_get_link, +}; + +/***************** + * NAPI interface + *****************/ + +static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi) +{ + return container_of(napi, struct rocker_port, napi_tx); +} + +static int rocker_port_poll_tx(struct napi_struct *napi, int budget) +{ + struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi); + struct rocker *rocker = rocker_port->rocker; + struct rocker_desc_info *desc_info; + u32 credits = 0; + int err; + + /* Cleanup tx descriptors */ + while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) { + err = rocker_desc_err(desc_info); + if (err && net_ratelimit()) + netdev_err(rocker_port->dev, "tx desc received with err %d\n", + err); + rocker_tx_desc_frags_unmap(rocker_port, desc_info); + dev_kfree_skb_any(rocker_desc_cookie_ptr_get(desc_info)); + credits++; + } + + if (credits && netif_queue_stopped(rocker_port->dev)) + netif_wake_queue(rocker_port->dev); + + napi_complete(napi); + rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits); + + return 0; +} + +static int rocker_port_rx_proc(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info) +{ + struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1]; + struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info); + size_t rx_len; + + if (!skb) + return -ENOENT; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info); + if (!attrs[ROCKER_TLV_RX_FRAG_LEN]) + return -EINVAL; + + rocker_dma_rx_ring_skb_unmap(rocker, attrs); + + rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]); + skb_put(skb, rx_len); + skb->protocol = eth_type_trans(skb, rocker_port->dev); + netif_receive_skb(skb); + + return rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, desc_info); +} + +static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi) +{ + return container_of(napi, struct rocker_port, napi_rx); +} + +static int rocker_port_poll_rx(struct napi_struct *napi, int budget) +{ + struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi); + struct rocker *rocker = rocker_port->rocker; + struct rocker_desc_info *desc_info; + u32 credits = 0; + int err; + + /* Process rx descriptors */ + while (credits < budget && + (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) { + err = rocker_desc_err(desc_info); + if (err) { + if (net_ratelimit()) + netdev_err(rocker_port->dev, "rx desc received with err %d\n", + err); + } else { + err = rocker_port_rx_proc(rocker, rocker_port, + desc_info); + if (err && net_ratelimit()) + netdev_err(rocker_port->dev, "rx processing failed with err %d\n", + err); + } + rocker_desc_gen_clear(desc_info); + rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info); + credits++; + } + + if (credits < budget) + napi_complete(napi); + + rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits); + + return credits; +} + +/***************** + * PCI driver ops + *****************/ + +static void rocker_carrier_init(struct rocker_port *rocker_port) +{ + struct rocker *rocker = rocker_port->rocker; + u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS); + bool link_up; + + link_up = link_status & (1 << rocker_port->lport); + if (link_up) + netif_carrier_on(rocker_port->dev); + else + netif_carrier_off(rocker_port->dev); +} + +static void rocker_remove_ports(struct rocker *rocker) +{ + int i; + + for (i = 0; i < rocker->port_count; i++) + unregister_netdev(rocker->ports[i]->dev); + kfree(rocker->ports); +} + +static void rocker_port_dev_addr_init(struct rocker *rocker, + struct rocker_port *rocker_port) +{ + struct pci_dev *pdev = rocker->pdev; + int err; + + err = rocker_cmd_get_port_settings_macaddr(rocker_port, + rocker_port->dev->dev_addr); + if (err) { + dev_warn(&pdev->dev, "failed to get mac address, using random\n"); + eth_hw_addr_random(rocker_port->dev); + } +} + +static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) +{ + struct pci_dev *pdev = rocker->pdev; + struct rocker_port *rocker_port; + struct net_device *dev; + int err; + + dev = alloc_etherdev(sizeof(struct rocker_port)); + if (!dev) + return -ENOMEM; + rocker_port = netdev_priv(dev); + rocker_port->dev = dev; + rocker_port->rocker = rocker; + rocker_port->port_number = port_number; + rocker_port->lport = port_number + 1; + + rocker_port_dev_addr_init(rocker, rocker_port); + dev->netdev_ops = &rocker_port_netdev_ops; + dev->ethtool_ops = &rocker_port_ethtool_ops; + netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx, + NAPI_POLL_WEIGHT); + netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx, + NAPI_POLL_WEIGHT); + rocker_carrier_init(rocker_port); + + dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, "register_netdev failed\n"); + goto err_register_netdev; + } + rocker->ports[port_number] = rocker_port; + + return 0; + +err_register_netdev: + free_netdev(dev); + return err; +} + +static int rocker_probe_ports(struct rocker *rocker) +{ + int i; + size_t alloc_size; + int err; + + alloc_size = sizeof(struct rocker_port *) * rocker->port_count; + rocker->ports = kmalloc(alloc_size, GFP_KERNEL); + for (i = 0; i < rocker->port_count; i++) { + err = rocker_probe_port(rocker, i); + if (err) + goto remove_ports; + } + return 0; + +remove_ports: + rocker_remove_ports(rocker); + return err; +} + +static int rocker_msix_init(struct rocker *rocker) +{ + struct pci_dev *pdev = rocker->pdev; + int msix_entries; + int i; + int err; + + msix_entries = pci_msix_vec_count(pdev); + if (msix_entries < 0) + return msix_entries; + + if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count)) + return -EINVAL; + + rocker->msix_entries = kmalloc_array(msix_entries, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!rocker->msix_entries) + return -ENOMEM; + + for (i = 0; i < msix_entries; i++) + rocker->msix_entries[i].entry = i; + + err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries); + if (err < 0) + goto err_enable_msix; + + return 0; + +err_enable_msix: + kfree(rocker->msix_entries); + return err; +} + +static void rocker_msix_fini(struct rocker *rocker) +{ + pci_disable_msix(rocker->pdev); + kfree(rocker->msix_entries); +} + +static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct rocker *rocker; + int err; + + rocker = kzalloc(sizeof(*rocker), GFP_KERNEL); + if (!rocker) + return -ENOMEM; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "pci_enable_device failed\n"); + goto err_pci_enable_device; + } + + err = pci_request_regions(pdev, rocker_driver_name); + if (err) { + dev_err(&pdev->dev, "pci_request_regions failed\n"); + goto err_pci_request_regions; + } + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (!err) { + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n"); + goto err_pci_set_dma_mask; + } + } else { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "pci_set_dma_mask failed\n"); + goto err_pci_set_dma_mask; + } + } + + if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) { + dev_err(&pdev->dev, "invalid PCI region size\n"); + goto err_pci_resource_len_check; + } + + rocker->hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!rocker->hw_addr) { + dev_err(&pdev->dev, "ioremap failed\n"); + err = -EIO; + goto err_ioremap; + } + pci_set_master(pdev); + + rocker->pdev = pdev; + pci_set_drvdata(pdev, rocker); + + rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT); + + err = rocker_msix_init(rocker); + if (err) { + dev_err(&pdev->dev, "MSI-X init failed\n"); + goto err_msix_init; + } + + err = rocker_basic_hw_test(rocker); + if (err) { + dev_err(&pdev->dev, "basic hw test failed\n"); + goto err_basic_hw_test; + } + + rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); + + err = rocker_dma_rings_init(rocker); + if (err) + goto err_dma_rings_init; + + err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), + rocker_cmd_irq_handler, 0, + rocker_driver_name, rocker); + if (err) { + dev_err(&pdev->dev, "cannot assign cmd irq\n"); + goto err_request_cmd_irq; + } + + err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), + rocker_event_irq_handler, 0, + rocker_driver_name, rocker); + if (err) { + dev_err(&pdev->dev, "cannot assign event irq\n"); + goto err_request_event_irq; + } + + rocker->hw.id = rocker_read64(rocker, SWITCH_ID); + + err = rocker_probe_ports(rocker); + if (err) { + dev_err(&pdev->dev, "failed to probe ports\n"); + goto err_probe_ports; + } + + dev_info(&pdev->dev, "Rocker switch with id %016llx\n", rocker->hw.id); + + return 0; + +err_probe_ports: + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); +err_request_event_irq: + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker); +err_request_cmd_irq: + rocker_dma_rings_fini(rocker); +err_dma_rings_init: +err_basic_hw_test: + rocker_msix_fini(rocker); +err_msix_init: + iounmap(rocker->hw_addr); +err_ioremap: +err_pci_resource_len_check: +err_pci_set_dma_mask: + pci_release_regions(pdev); +err_pci_request_regions: + pci_disable_device(pdev); +err_pci_enable_device: + kfree(rocker); + return err; +} + +static void rocker_remove(struct pci_dev *pdev) +{ + struct rocker *rocker = pci_get_drvdata(pdev); + + rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); + rocker_remove_ports(rocker); + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker); + rocker_dma_rings_fini(rocker); + rocker_msix_fini(rocker); + iounmap(rocker->hw_addr); + pci_release_regions(rocker->pdev); + pci_disable_device(rocker->pdev); + kfree(rocker); +} + +static struct pci_driver rocker_pci_driver = { + .name = rocker_driver_name, + .id_table = rocker_pci_id_table, + .probe = rocker_probe, + .remove = rocker_remove, +}; + +/*********************** + * Module init and exit + ***********************/ + +static int __init rocker_module_init(void) +{ + return pci_register_driver(&rocker_pci_driver); +} + +static void __exit rocker_module_exit(void) +{ + pci_unregister_driver(&rocker_pci_driver); +} + +module_init(rocker_module_init); +module_exit(rocker_module_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Jiri Pirko "); +MODULE_AUTHOR("Scott Feldman "); +MODULE_DESCRIPTION("Rocker switch device driver"); +MODULE_DEVICE_TABLE(pci, rocker_pci_id_table); diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h new file mode 100644 index 000000000000..5251cf880b87 --- /dev/null +++ b/drivers/net/ethernet/rocker/rocker.h @@ -0,0 +1,427 @@ +/* + * drivers/net/ethernet/rocker/rocker.h - Rocker switch device driver + * Copyright (c) 2014 Jiri Pirko + * Copyright (c) 2014 Scott Feldman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _ROCKER_H +#define _ROCKER_H + +#include + +#define PCI_VENDOR_ID_REDHAT 0x1b36 +#define PCI_DEVICE_ID_REDHAT_ROCKER 0x0006 + +#define ROCKER_PCI_BAR0_SIZE 0x2000 + +/* MSI-X vectors */ +enum { + ROCKER_MSIX_VEC_CMD, + ROCKER_MSIX_VEC_EVENT, + ROCKER_MSIX_VEC_TEST, + ROCKER_MSIX_VEC_RESERVED0, + __ROCKER_MSIX_VEC_TX, + __ROCKER_MSIX_VEC_RX, +#define ROCKER_MSIX_VEC_TX(port) \ + (__ROCKER_MSIX_VEC_TX + ((port) * 2)) +#define ROCKER_MSIX_VEC_RX(port) \ + (__ROCKER_MSIX_VEC_RX + ((port) * 2)) +#define ROCKER_MSIX_VEC_COUNT(portcnt) \ + (ROCKER_MSIX_VEC_RX((portcnt - 1)) + 1) +}; + +/* Rocker bogus registers */ +#define ROCKER_BOGUS_REG0 0x0000 +#define ROCKER_BOGUS_REG1 0x0004 +#define ROCKER_BOGUS_REG2 0x0008 +#define ROCKER_BOGUS_REG3 0x000c + +/* Rocker test registers */ +#define ROCKER_TEST_REG 0x0010 +#define ROCKER_TEST_REG64 0x0018 /* 8-byte */ +#define ROCKER_TEST_IRQ 0x0020 +#define ROCKER_TEST_DMA_ADDR 0x0028 /* 8-byte */ +#define ROCKER_TEST_DMA_SIZE 0x0030 +#define ROCKER_TEST_DMA_CTRL 0x0034 + +/* Rocker test register ctrl */ +#define ROCKER_TEST_DMA_CTRL_CLEAR (1 << 0) +#define ROCKER_TEST_DMA_CTRL_FILL (1 << 1) +#define ROCKER_TEST_DMA_CTRL_INVERT (1 << 2) + +/* Rocker DMA ring register offsets */ +#define ROCKER_DMA_DESC_ADDR(x) (0x1000 + (x) * 32) /* 8-byte */ +#define ROCKER_DMA_DESC_SIZE(x) (0x1008 + (x) * 32) +#define ROCKER_DMA_DESC_HEAD(x) (0x100c + (x) * 32) +#define ROCKER_DMA_DESC_TAIL(x) (0x1010 + (x) * 32) +#define ROCKER_DMA_DESC_CTRL(x) (0x1014 + (x) * 32) +#define ROCKER_DMA_DESC_CREDITS(x) (0x1018 + (x) * 32) +#define ROCKER_DMA_DESC_RES1(x) (0x101c + (x) * 32) + +/* Rocker dma ctrl register bits */ +#define ROCKER_DMA_DESC_CTRL_RESET (1 << 0) + +/* Rocker DMA ring types */ +enum rocker_dma_type { + ROCKER_DMA_CMD, + ROCKER_DMA_EVENT, + __ROCKER_DMA_TX, + __ROCKER_DMA_RX, +#define ROCKER_DMA_TX(port) (__ROCKER_DMA_TX + (port) * 2) +#define ROCKER_DMA_RX(port) (__ROCKER_DMA_RX + (port) * 2) +}; + +/* Rocker DMA ring size limits and default sizes */ +#define ROCKER_DMA_SIZE_MIN 2ul +#define ROCKER_DMA_SIZE_MAX 65536ul +#define ROCKER_DMA_CMD_DEFAULT_SIZE 32ul +#define ROCKER_DMA_EVENT_DEFAULT_SIZE 32ul +#define ROCKER_DMA_TX_DEFAULT_SIZE 64ul +#define ROCKER_DMA_TX_DESC_SIZE 256 +#define ROCKER_DMA_RX_DEFAULT_SIZE 64ul +#define ROCKER_DMA_RX_DESC_SIZE 256 + +/* Rocker DMA descriptor struct */ +struct rocker_desc { + u64 buf_addr; + u64 cookie; + u16 buf_size; + u16 tlv_size; + u16 resv[5]; + u16 comp_err; +}; + +#define ROCKER_DMA_DESC_COMP_ERR_GEN (1 << 15) + +/* Rocker DMA TLV struct */ +struct rocker_tlv { + u32 type; + u16 len; +}; + +/* TLVs */ +enum { + ROCKER_TLV_CMD_UNSPEC, + ROCKER_TLV_CMD_TYPE, /* u16 */ + ROCKER_TLV_CMD_INFO, /* nest */ + + __ROCKER_TLV_CMD_MAX, + ROCKER_TLV_CMD_MAX = __ROCKER_TLV_CMD_MAX - 1, +}; + +enum { + ROCKER_TLV_CMD_TYPE_UNSPEC, + ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS, + + __ROCKER_TLV_CMD_TYPE_MAX, + ROCKER_TLV_CMD_TYPE_MAX = __ROCKER_TLV_CMD_TYPE_MAX - 1, +}; + +enum { + ROCKER_TLV_CMD_PORT_SETTINGS_UNSPEC, + ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, /* u32 */ + ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, /* u32 */ + ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, /* u8 */ + ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, /* u8 */ + ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, /* binary */ + ROCKER_TLV_CMD_PORT_SETTINGS_MODE, /* u8 */ + + __ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + ROCKER_TLV_CMD_PORT_SETTINGS_MAX = + __ROCKER_TLV_CMD_PORT_SETTINGS_MAX - 1, +}; + +enum rocker_port_mode { + ROCKER_PORT_MODE_OF_DPA, +}; + +enum { + ROCKER_TLV_EVENT_UNSPEC, + ROCKER_TLV_EVENT_TYPE, /* u16 */ + ROCKER_TLV_EVENT_INFO, /* nest */ + + __ROCKER_TLV_EVENT_MAX, + ROCKER_TLV_EVENT_MAX = __ROCKER_TLV_EVENT_MAX - 1, +}; + +enum { + ROCKER_TLV_EVENT_TYPE_UNSPEC, + ROCKER_TLV_EVENT_TYPE_LINK_CHANGED, + ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN, + + __ROCKER_TLV_EVENT_TYPE_MAX, + ROCKER_TLV_EVENT_TYPE_MAX = __ROCKER_TLV_EVENT_TYPE_MAX - 1, +}; + +enum { + ROCKER_TLV_EVENT_LINK_CHANGED_UNSPEC, + ROCKER_TLV_EVENT_LINK_CHANGED_LPORT, /* u32 */ + ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP, /* u8 */ + + __ROCKER_TLV_EVENT_LINK_CHANGED_MAX, + ROCKER_TLV_EVENT_LINK_CHANGED_MAX = + __ROCKER_TLV_EVENT_LINK_CHANGED_MAX - 1, +}; + +enum { + ROCKER_TLV_EVENT_MAC_VLAN_UNSPEC, + ROCKER_TLV_EVENT_MAC_VLAN_LPORT, /* u32 */ + ROCKER_TLV_EVENT_MAC_VLAN_MAC, /* binary */ + ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID, /* __be16 */ + + __ROCKER_TLV_EVENT_MAC_VLAN_MAX, + ROCKER_TLV_EVENT_MAC_VLAN_MAX = __ROCKER_TLV_EVENT_MAC_VLAN_MAX - 1, +}; + +enum { + ROCKER_TLV_RX_UNSPEC, + ROCKER_TLV_RX_FLAGS, /* u16, see ROCKER_RX_FLAGS_ */ + ROCKER_TLV_RX_CSUM, /* u16 */ + ROCKER_TLV_RX_FRAG_ADDR, /* u64 */ + ROCKER_TLV_RX_FRAG_MAX_LEN, /* u16 */ + ROCKER_TLV_RX_FRAG_LEN, /* u16 */ + + __ROCKER_TLV_RX_MAX, + ROCKER_TLV_RX_MAX = __ROCKER_TLV_RX_MAX - 1, +}; + +#define ROCKER_RX_FLAGS_IPV4 (1 << 0) +#define ROCKER_RX_FLAGS_IPV6 (1 << 1) +#define ROCKER_RX_FLAGS_CSUM_CALC (1 << 2) +#define ROCKER_RX_FLAGS_IPV4_CSUM_GOOD (1 << 3) +#define ROCKER_RX_FLAGS_IP_FRAG (1 << 4) +#define ROCKER_RX_FLAGS_TCP (1 << 5) +#define ROCKER_RX_FLAGS_UDP (1 << 6) +#define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD (1 << 7) + +enum { + ROCKER_TLV_TX_UNSPEC, + ROCKER_TLV_TX_OFFLOAD, /* u8, see ROCKER_TX_OFFLOAD_ */ + ROCKER_TLV_TX_L3_CSUM_OFF, /* u16 */ + ROCKER_TLV_TX_TSO_MSS, /* u16 */ + ROCKER_TLV_TX_TSO_HDR_LEN, /* u16 */ + ROCKER_TLV_TX_FRAGS, /* array */ + + __ROCKER_TLV_TX_MAX, + ROCKER_TLV_TX_MAX = __ROCKER_TLV_TX_MAX - 1, +}; + +#define ROCKER_TX_OFFLOAD_NONE 0 +#define ROCKER_TX_OFFLOAD_IP_CSUM 1 +#define ROCKER_TX_OFFLOAD_TCP_UDP_CSUM 2 +#define ROCKER_TX_OFFLOAD_L3_CSUM 3 +#define ROCKER_TX_OFFLOAD_TSO 4 + +#define ROCKER_TX_FRAGS_MAX 16 + +enum { + ROCKER_TLV_TX_FRAG_UNSPEC, + ROCKER_TLV_TX_FRAG, /* nest */ + + __ROCKER_TLV_TX_FRAG_MAX, + ROCKER_TLV_TX_FRAG_MAX = __ROCKER_TLV_TX_FRAG_MAX - 1, +}; + +enum { + ROCKER_TLV_TX_FRAG_ATTR_UNSPEC, + ROCKER_TLV_TX_FRAG_ATTR_ADDR, /* u64 */ + ROCKER_TLV_TX_FRAG_ATTR_LEN, /* u16 */ + + __ROCKER_TLV_TX_FRAG_ATTR_MAX, + ROCKER_TLV_TX_FRAG_ATTR_MAX = __ROCKER_TLV_TX_FRAG_ATTR_MAX - 1, +}; + +/* cmd info nested for OF-DPA msgs */ +enum { + ROCKER_TLV_OF_DPA_UNSPEC, + ROCKER_TLV_OF_DPA_TABLE_ID, /* u16 */ + ROCKER_TLV_OF_DPA_PRIORITY, /* u32 */ + ROCKER_TLV_OF_DPA_HARDTIME, /* u32 */ + ROCKER_TLV_OF_DPA_IDLETIME, /* u32 */ + ROCKER_TLV_OF_DPA_COOKIE, /* u64 */ + ROCKER_TLV_OF_DPA_IN_LPORT, /* u32 */ + ROCKER_TLV_OF_DPA_IN_LPORT_MASK, /* u32 */ + ROCKER_TLV_OF_DPA_OUT_LPORT, /* u32 */ + ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, /* u16 */ + ROCKER_TLV_OF_DPA_GROUP_ID, /* u32 */ + ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, /* u32 */ + ROCKER_TLV_OF_DPA_GROUP_COUNT, /* u16 */ + ROCKER_TLV_OF_DPA_GROUP_IDS, /* u32 array */ + ROCKER_TLV_OF_DPA_VLAN_ID, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_ID_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_PCP, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_PCP_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_PCP_ACTION, /* u8 */ + ROCKER_TLV_OF_DPA_NEW_VLAN_ID, /* __be16 */ + ROCKER_TLV_OF_DPA_NEW_VLAN_PCP, /* u8 */ + ROCKER_TLV_OF_DPA_TUNNEL_ID, /* u32 */ + ROCKER_TLV_OF_DPA_TUN_LOG_LPORT, /* u32 */ + ROCKER_TLV_OF_DPA_ETHERTYPE, /* __be16 */ + ROCKER_TLV_OF_DPA_DST_MAC, /* binary */ + ROCKER_TLV_OF_DPA_DST_MAC_MASK, /* binary */ + ROCKER_TLV_OF_DPA_SRC_MAC, /* binary */ + ROCKER_TLV_OF_DPA_SRC_MAC_MASK, /* binary */ + ROCKER_TLV_OF_DPA_IP_PROTO, /* u8 */ + ROCKER_TLV_OF_DPA_IP_PROTO_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_IP_DSCP, /* u8 */ + ROCKER_TLV_OF_DPA_IP_DSCP_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_IP_DSCP_ACTION, /* u8 */ + ROCKER_TLV_OF_DPA_NEW_IP_DSCP, /* u8 */ + ROCKER_TLV_OF_DPA_IP_ECN, /* u8 */ + ROCKER_TLV_OF_DPA_IP_ECN_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_DST_IP, /* __be32 */ + ROCKER_TLV_OF_DPA_DST_IP_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_SRC_IP, /* __be32 */ + ROCKER_TLV_OF_DPA_SRC_IP_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_DST_IPV6, /* binary */ + ROCKER_TLV_OF_DPA_DST_IPV6_MASK, /* binary */ + ROCKER_TLV_OF_DPA_SRC_IPV6, /* binary */ + ROCKER_TLV_OF_DPA_SRC_IPV6_MASK, /* binary */ + ROCKER_TLV_OF_DPA_SRC_ARP_IP, /* __be32 */ + ROCKER_TLV_OF_DPA_SRC_ARP_IP_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_L4_DST_PORT, /* __be16 */ + ROCKER_TLV_OF_DPA_L4_DST_PORT_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_L4_SRC_PORT, /* __be16 */ + ROCKER_TLV_OF_DPA_L4_SRC_PORT_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_ICMP_TYPE, /* u8 */ + ROCKER_TLV_OF_DPA_ICMP_TYPE_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_ICMP_CODE, /* u8 */ + ROCKER_TLV_OF_DPA_ICMP_CODE_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_IPV6_LABEL, /* __be32 */ + ROCKER_TLV_OF_DPA_IPV6_LABEL_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_QUEUE_ID_ACTION, /* u8 */ + ROCKER_TLV_OF_DPA_NEW_QUEUE_ID, /* u8 */ + ROCKER_TLV_OF_DPA_CLEAR_ACTIONS, /* u32 */ + ROCKER_TLV_OF_DPA_POP_VLAN, /* u8 */ + ROCKER_TLV_OF_DPA_TTL_CHECK, /* u8 */ + ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, /* u8 */ + + __ROCKER_TLV_OF_DPA_MAX, + ROCKER_TLV_OF_DPA_MAX = __ROCKER_TLV_OF_DPA_MAX - 1, +}; + +/* OF-DPA table IDs */ + +enum rocker_of_dpa_table_id { + ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT = 0, + ROCKER_OF_DPA_TABLE_ID_VLAN = 10, + ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC = 20, + ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING = 30, + ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING = 40, + ROCKER_OF_DPA_TABLE_ID_BRIDGING = 50, + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY = 60, +}; + +/* OF-DPA flow stats */ +enum { + ROCKER_TLV_OF_DPA_FLOW_STAT_UNSPEC, + ROCKER_TLV_OF_DPA_FLOW_STAT_DURATION, /* u32 */ + ROCKER_TLV_OF_DPA_FLOW_STAT_RX_PKTS, /* u64 */ + ROCKER_TLV_OF_DPA_FLOW_STAT_TX_PKTS, /* u64 */ + + __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX, + ROCKER_TLV_OF_DPA_FLOW_STAT_MAX = __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX - 1, +}; + +/* OF-DPA group types */ +enum rocker_of_dpa_group_type { + ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE = 0, + ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE, + ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST, + ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST, + ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD, + ROCKER_OF_DPA_GROUP_TYPE_L3_INTERFACE, + ROCKER_OF_DPA_GROUP_TYPE_L3_MCAST, + ROCKER_OF_DPA_GROUP_TYPE_L3_ECMP, + ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY, +}; + +/* OF-DPA group L2 overlay types */ +enum rocker_of_dpa_overlay_type { + ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_UCAST = 0, + ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_MCAST, + ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_UCAST, + ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_MCAST, +}; + +/* OF-DPA group ID encoding */ +#define ROCKER_GROUP_TYPE_SHIFT 28 +#define ROCKER_GROUP_TYPE_MASK 0xf0000000 +#define ROCKER_GROUP_VLAN_SHIFT 16 +#define ROCKER_GROUP_VLAN_MASK 0x0fff0000 +#define ROCKER_GROUP_PORT_SHIFT 0 +#define ROCKER_GROUP_PORT_MASK 0x0000ffff +#define ROCKER_GROUP_TUNNEL_ID_SHIFT 12 +#define ROCKER_GROUP_TUNNEL_ID_MASK 0x0ffff000 +#define ROCKER_GROUP_SUBTYPE_SHIFT 10 +#define ROCKER_GROUP_SUBTYPE_MASK 0x00000c00 +#define ROCKER_GROUP_INDEX_SHIFT 0 +#define ROCKER_GROUP_INDEX_MASK 0x0000ffff +#define ROCKER_GROUP_INDEX_LONG_SHIFT 0 +#define ROCKER_GROUP_INDEX_LONG_MASK 0x0fffffff + +#define ROCKER_GROUP_TYPE_GET(group_id) \ + (((group_id) & ROCKER_GROUP_TYPE_MASK) >> ROCKER_GROUP_TYPE_SHIFT) +#define ROCKER_GROUP_TYPE_SET(type) \ + (((type) << ROCKER_GROUP_TYPE_SHIFT) & ROCKER_GROUP_TYPE_MASK) +#define ROCKER_GROUP_VLAN_GET(group_id) \ + (((group_id) & ROCKER_GROUP_VLAN_ID_MASK) >> ROCKER_GROUP_VLAN_ID_SHIFT) +#define ROCKER_GROUP_VLAN_SET(vlan_id) \ + (((vlan_id) << ROCKER_GROUP_VLAN_SHIFT) & ROCKER_GROUP_VLAN_MASK) +#define ROCKER_GROUP_PORT_GET(group_id) \ + (((group_id) & ROCKER_GROUP_PORT_MASK) >> ROCKER_GROUP_PORT_SHIFT) +#define ROCKER_GROUP_PORT_SET(port) \ + (((port) << ROCKER_GROUP_PORT_SHIFT) & ROCKER_GROUP_PORT_MASK) +#define ROCKER_GROUP_INDEX_GET(group_id) \ + (((group_id) & ROCKER_GROUP_INDEX_MASK) >> ROCKER_GROUP_INDEX_SHIFT) +#define ROCKER_GROUP_INDEX_SET(index) \ + (((index) << ROCKER_GROUP_INDEX_SHIFT) & ROCKER_GROUP_INDEX_MASK) +#define ROCKER_GROUP_INDEX_LONG_GET(group_id) \ + (((group_id) & ROCKER_GROUP_INDEX_LONG_MASK) >> \ + ROCKER_GROUP_INDEX_LONG_SHIFT) +#define ROCKER_GROUP_INDEX_LONG_SET(index) \ + (((index) << ROCKER_GROUP_INDEX_LONG_SHIFT) & \ + ROCKER_GROUP_INDEX_LONG_MASK) + +#define ROCKER_GROUP_NONE 0 +#define ROCKER_GROUP_L2_INTERFACE(vlan_id, port) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) |\ + ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_PORT_SET(port)) +#define ROCKER_GROUP_L2_REWRITE(index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE) |\ + ROCKER_GROUP_INDEX_LONG_SET(index)) +#define ROCKER_GROUP_L2_MCAST(vlan_id, index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST) |\ + ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index)) +#define ROCKER_GROUP_L2_FLOOD(vlan_id, index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD) |\ + ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index)) +#define ROCKER_GROUP_L3_UNICAST(index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST) |\ + ROCKER_GROUP_INDEX_LONG_SET(index)) + +/* Rocker general purpose registers */ +#define ROCKER_CONTROL 0x0300 +#define ROCKER_PORT_PHYS_COUNT 0x0304 +#define ROCKER_PORT_PHYS_LINK_STATUS 0x0310 /* 8-byte */ +#define ROCKER_PORT_PHYS_ENABLE 0x0318 /* 8-byte */ +#define ROCKER_SWITCH_ID 0x0320 /* 8-byte */ + +/* Rocker control bits */ +#define ROCKER_CONTROL_RESET (1 << 0) + +#endif -- cgit v1.2.3-59-g8ed1b From 0c570c183ace73f06e42d65432bf938fbdde6524 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Tue, 2 Dec 2014 16:41:35 +0100 Subject: cpuidle: add MAINTAINERS entry for ARM Exynos cpuidle driver Since there has been quite a lot of development going on for ARM Exynos cpuidle driver recently I would like to add separate MAINTAINERS entry for it and add myself as the primary maintainer. The merging process would remain (almost) unchanged with patches going (with my Ack) through Daniel's or Kukjin's tree. Signed-off-by: Bartlomiej Zolnierkiewicz Acked-by: Kyungmin Park Signed-off-by: Rafael J. Wysocki --- MAINTAINERS | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index c444907ccd69..994a68f46926 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2632,6 +2632,16 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git S: Maintained F: drivers/cpuidle/cpuidle-big_little.c +CPUIDLE DRIVER - ARM EXYNOS +M: Bartlomiej Zolnierkiewicz +M: Daniel Lezcano +M: Kukjin Kim +L: linux-pm@vger.kernel.org +L: linux-samsung-soc@vger.kernel.org +S: Supported +F: drivers/cpuidle/cpuidle-exynos.c +F: arch/arm/mach-exynos/pm.c + CPUIDLE DRIVERS M: Rafael J. Wysocki M: Daniel Lezcano -- cgit v1.2.3-59-g8ed1b From 7e8f403fecd38d018a2c1868b29e9c1ef7d6fc56 Mon Sep 17 00:00:00 2001 From: Pawel Moll Date: Mon, 1 Dec 2014 14:16:33 +0000 Subject: MAINTAINERS: ARM Versatile Express platform, add missing pattern VE's reset driver lives at the third level of the directories: drivers/power/reset/vexpress-poweroff.c and wasn't matched by the */*/vexpress* pattern. Added additional pattern for all files at this level. This should be enough for a while... Signed-off-by: Pawel Moll Signed-off-by: Arnd Bergmann --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 63542b9ec6cc..22e418b63f43 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1505,6 +1505,7 @@ S: Maintained F: arch/arm/boot/dts/vexpress* F: arch/arm/mach-vexpress/ F: */*/vexpress* +F: */*/*/vexpress* F: drivers/clk/versatile/clk-vexpress-osc.c F: drivers/clocksource/versatile.c -- cgit v1.2.3-59-g8ed1b From 9a4ea5a98652f602d4ec16957f64fd666e862b09 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Mon, 17 Nov 2014 09:17:49 -0300 Subject: [media] MAINTAINERS: Add myself as img-ir maintainer Add myself as the maintainer for the Imagination Technologies Infrared Decoder driver. Signed-off-by: James Hogan Signed-off-by: Mauro Carvalho Chehab --- MAINTAINERS | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index f6058ceb297d..9c49eb680020 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4771,6 +4771,11 @@ L: linux-security-module@vger.kernel.org S: Supported F: security/integrity/ima/ +IMGTEC IR DECODER DRIVER +M: James Hogan +S: Maintained +F: drivers/media/rc/img-ir/ + IMS TWINTURBO FRAMEBUFFER DRIVER L: linux-fbdev@vger.kernel.org S: Orphan -- cgit v1.2.3-59-g8ed1b From 7be6ff65ce7df4fc6bfd87d261a723a5558fee72 Mon Sep 17 00:00:00 2001 From: "John W. Linville" Date: Thu, 4 Dec 2014 16:52:41 -0500 Subject: MAINTAINERS: orphan rtl8180 Signed-off-by: John W. Linville --- MAINTAINERS | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 7ec37a396ffe..e3f40df47513 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7845,11 +7845,10 @@ S: Maintained F: drivers/media/dvb-frontends/rtl2832_sdr* RTL8180 WIRELESS DRIVER -M: "John W. Linville" L: linux-wireless@vger.kernel.org W: http://wireless.kernel.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git -S: Maintained +S: Orphan F: drivers/net/wireless/rtl818x/rtl8180/ RTL8187 WIRELESS DRIVER -- cgit v1.2.3-59-g8ed1b From bb1d5ddaf962921a5c3ad4166e3c1b7b70ba5778 Mon Sep 17 00:00:00 2001 From: Kevin Cernekee Date: Mon, 10 Nov 2014 13:09:24 -0800 Subject: Update MAINTAINERS entry Update link to point to Steve's current tree on git.samba.org. Remove link to the old patchwork instance which shows no new patches since 2010. Signed-off-by: Kevin Cernekee Signed-off-by: Steve French --- MAINTAINERS | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index c721042e7e45..f1e593297ddf 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2504,8 +2504,7 @@ M: Steve French L: linux-cifs@vger.kernel.org L: samba-technical@lists.samba.org (moderated for non-subscribers) W: http://linux-cifs.samba.org/ -Q: http://patchwork.ozlabs.org/project/linux-cifs-client/list/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/sfrench/cifs-2.6.git +T: git git://git.samba.org/sfrench/cifs-2.6.git S: Supported F: Documentation/filesystems/cifs/ F: fs/cifs/ -- cgit v1.2.3-59-g8ed1b From 383b8fb946af329a3fce2446e7d385378d0f291f Mon Sep 17 00:00:00 2001 From: Ley Foon Tan Date: Thu, 6 Nov 2014 15:20:14 +0800 Subject: MAINTAINERS: Add nios2 maintainer Signed-off-by: Ley Foon Tan --- MAINTAINERS | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index c721042e7e45..e56bace37ce0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6561,6 +6561,13 @@ S: Maintained F: Documentation/scsi/NinjaSCSI.txt F: drivers/scsi/nsp32* +NIOS2 ARCHITECTURE +M: Ley Foon Tan +L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers) +T: git git://git.rocketboards.org/linux-socfpga.git +S: Maintained +F: arch/nios2/ + NTB DRIVER M: Jon Mason M: Dave Jiang -- cgit v1.2.3-59-g8ed1b From 40ed1b4caf76e27807a49d42a41f43e8393a7607 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Mon, 8 Dec 2014 10:53:13 +0100 Subject: MAINTAINERS: add I2C dt bindings also to I2C realm So get_maintainer.pl will add us when something gets updated there. Signed-off-by: Wolfram Sang --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index ea4d0058fd1b..794a8f0a31f6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4561,6 +4561,7 @@ W: https://i2c.wiki.kernel.org/ Q: https://patchwork.ozlabs.org/project/linux-i2c/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux.git S: Maintained +F: Documentation/devicetree/bindings/i2c/ F: Documentation/i2c/ F: drivers/i2c/ F: include/linux/i2c.h -- cgit v1.2.3-59-g8ed1b From af6c9f1657ca6d2ef2b2c0e31ad17c6fbf773baf Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 19 Nov 2014 18:11:00 -0800 Subject: thermal: provide an UAPI header file include/linux/thermal.h contains definitions for the Thermal generic netlink family, but none of the valuable information relevant to user-space such as the Genl family name, multicast group, version or command set and data types is exported to user-space. Export all the relevant generic netlink information to user-space to make this genl family usable by user-space, and while at it, export THERMAL_NAME_LENGTH since it limits name length for thermal_hwmon devices. Kbuild and MAINTAINERS are also updated accordingly to reflect this new file: include/uapi/linux/thermal.h. Signed-off-by: Florian Fainelli Signed-off-by: Zhang Rui --- MAINTAINERS | 1 + include/linux/thermal.h | 31 +------------------------------ include/uapi/linux/Kbuild | 1 + include/uapi/linux/thermal.h | 35 +++++++++++++++++++++++++++++++++++ 4 files changed, 38 insertions(+), 30 deletions(-) create mode 100644 include/uapi/linux/thermal.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 0ff630de8a6d..9c709f503c03 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9295,6 +9295,7 @@ Q: https://patchwork.kernel.org/project/linux-pm/list/ S: Supported F: drivers/thermal/ F: include/linux/thermal.h +F: include/uapi/linux/thermal.h F: include/linux/cpu_cooling.h F: Documentation/devicetree/bindings/thermal/ diff --git a/include/linux/thermal.h b/include/linux/thermal.h index ef90838b36a0..70f87406bed9 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -29,10 +29,10 @@ #include #include #include +#include #define THERMAL_TRIPS_NONE -1 #define THERMAL_MAX_TRIPS 12 -#define THERMAL_NAME_LENGTH 20 /* invalid cooling state */ #define THERMAL_CSTATE_INVALID -1UL @@ -49,11 +49,6 @@ #define MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, off) (((t) / 100) + (off)) #define MILLICELSIUS_TO_DECI_KELVIN(t) MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, 2732) -/* Adding event notification support elements */ -#define THERMAL_GENL_FAMILY_NAME "thermal_event" -#define THERMAL_GENL_VERSION 0x01 -#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_grp" - /* Default Thermal Governor */ #if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE) #define DEFAULT_THERMAL_GOVERNOR "step_wise" @@ -86,30 +81,6 @@ enum thermal_trend { THERMAL_TREND_DROP_FULL, /* apply lowest cooling action */ }; -/* Events supported by Thermal Netlink */ -enum events { - THERMAL_AUX0, - THERMAL_AUX1, - THERMAL_CRITICAL, - THERMAL_DEV_FAULT, -}; - -/* attributes of thermal_genl_family */ -enum { - THERMAL_GENL_ATTR_UNSPEC, - THERMAL_GENL_ATTR_EVENT, - __THERMAL_GENL_ATTR_MAX, -}; -#define THERMAL_GENL_ATTR_MAX (__THERMAL_GENL_ATTR_MAX - 1) - -/* commands supported by the thermal_genl_family */ -enum { - THERMAL_GENL_CMD_UNSPEC, - THERMAL_GENL_CMD_EVENT, - __THERMAL_GENL_CMD_MAX, -}; -#define THERMAL_GENL_CMD_MAX (__THERMAL_GENL_CMD_MAX - 1) - struct thermal_zone_device_ops { int (*bind) (struct thermal_zone_device *, struct thermal_cooling_device *); diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 4c94f31a8c99..a1943e2d1264 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -383,6 +383,7 @@ header-y += tcp.h header-y += tcp_metrics.h header-y += telephony.h header-y += termios.h +header-y += thermal.h header-y += time.h header-y += times.h header-y += timex.h diff --git a/include/uapi/linux/thermal.h b/include/uapi/linux/thermal.h new file mode 100644 index 000000000000..ac5535855982 --- /dev/null +++ b/include/uapi/linux/thermal.h @@ -0,0 +1,35 @@ +#ifndef _UAPI_LINUX_THERMAL_H +#define _UAPI_LINUX_THERMAL_H + +#define THERMAL_NAME_LENGTH 20 + +/* Adding event notification support elements */ +#define THERMAL_GENL_FAMILY_NAME "thermal_event" +#define THERMAL_GENL_VERSION 0x01 +#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_grp" + +/* Events supported by Thermal Netlink */ +enum events { + THERMAL_AUX0, + THERMAL_AUX1, + THERMAL_CRITICAL, + THERMAL_DEV_FAULT, +}; + +/* attributes of thermal_genl_family */ +enum { + THERMAL_GENL_ATTR_UNSPEC, + THERMAL_GENL_ATTR_EVENT, + __THERMAL_GENL_ATTR_MAX, +}; +#define THERMAL_GENL_ATTR_MAX (__THERMAL_GENL_ATTR_MAX - 1) + +/* commands supported by the thermal_genl_family */ +enum { + THERMAL_GENL_CMD_UNSPEC, + THERMAL_GENL_CMD_EVENT, + __THERMAL_GENL_CMD_MAX, +}; +#define THERMAL_GENL_CMD_MAX (__THERMAL_GENL_CMD_MAX - 1) + +#endif /* _UAPI_LINUX_THERMAL_H */ -- cgit v1.2.3-59-g8ed1b From 3d6969a641d01cc9b6aa199a6f01eb1802522baf Mon Sep 17 00:00:00 2001 From: Bryan Wu Date: Wed, 3 Dec 2014 11:14:00 -0800 Subject: MAINTAINERS: Remove my name from Backlight subsystem Signed-off-by: Bryan Wu Signed-off-by: Lee Jones --- MAINTAINERS | 1 - 1 file changed, 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 3c6427190be2..fc6c57b7a3ae 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1861,7 +1861,6 @@ F: drivers/net/wireless/b43legacy/ BACKLIGHT CLASS/SUBSYSTEM M: Jingoo Han -M: Bryan Wu M: Lee Jones S: Maintained F: drivers/video/backlight/ -- cgit v1.2.3-59-g8ed1b From 5d1ea48bdde67898e87d6d8f511fd097fa64c749 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:44:55 -0800 Subject: mm: page_cgroup: rename file to mm/swap_cgroup.c Now that the external page_cgroup data structure and its lookup is gone, the only code remaining in there is swap slot accounting. Rename it and move the conditional compilation into mm/Makefile. Signed-off-by: Johannes Weiner Acked-by: Michal Hocko Acked-by: Vladimir Davydov Acked-by: David S. Miller Acked-by: KAMEZAWA Hiroyuki Cc: "Kirill A. Shutemov" Cc: Tejun Heo Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- MAINTAINERS | 2 +- include/linux/page_cgroup.h | 40 --------- include/linux/swap_cgroup.h | 42 +++++++++ mm/Makefile | 3 +- mm/memcontrol.c | 2 +- mm/page_cgroup.c | 211 -------------------------------------------- mm/swap_cgroup.c | 208 +++++++++++++++++++++++++++++++++++++++++++ mm/swap_state.c | 1 - mm/swapfile.c | 2 +- 9 files changed, 255 insertions(+), 256 deletions(-) delete mode 100644 include/linux/page_cgroup.h create mode 100644 include/linux/swap_cgroup.h delete mode 100644 mm/page_cgroup.c create mode 100644 mm/swap_cgroup.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 0d6469a2cf70..0aedd3e1804b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2606,7 +2606,7 @@ L: cgroups@vger.kernel.org L: linux-mm@kvack.org S: Maintained F: mm/memcontrol.c -F: mm/page_cgroup.c +F: mm/swap_cgroup.c CORETEMP HARDWARE MONITORING DRIVER M: Fenghua Yu diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h deleted file mode 100644 index 65be35785c86..000000000000 --- a/include/linux/page_cgroup.h +++ /dev/null @@ -1,40 +0,0 @@ -#ifndef __LINUX_PAGE_CGROUP_H -#define __LINUX_PAGE_CGROUP_H - -#include - -#ifdef CONFIG_MEMCG_SWAP -extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, - unsigned short old, unsigned short new); -extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); -extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent); -extern int swap_cgroup_swapon(int type, unsigned long max_pages); -extern void swap_cgroup_swapoff(int type); -#else - -static inline -unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) -{ - return 0; -} - -static inline -unsigned short lookup_swap_cgroup_id(swp_entry_t ent) -{ - return 0; -} - -static inline int -swap_cgroup_swapon(int type, unsigned long max_pages) -{ - return 0; -} - -static inline void swap_cgroup_swapoff(int type) -{ - return; -} - -#endif /* CONFIG_MEMCG_SWAP */ - -#endif /* __LINUX_PAGE_CGROUP_H */ diff --git a/include/linux/swap_cgroup.h b/include/linux/swap_cgroup.h new file mode 100644 index 000000000000..145306bdc92f --- /dev/null +++ b/include/linux/swap_cgroup.h @@ -0,0 +1,42 @@ +#ifndef __LINUX_SWAP_CGROUP_H +#define __LINUX_SWAP_CGROUP_H + +#include + +#ifdef CONFIG_MEMCG_SWAP + +extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, + unsigned short old, unsigned short new); +extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); +extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent); +extern int swap_cgroup_swapon(int type, unsigned long max_pages); +extern void swap_cgroup_swapoff(int type); + +#else + +static inline +unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) +{ + return 0; +} + +static inline +unsigned short lookup_swap_cgroup_id(swp_entry_t ent) +{ + return 0; +} + +static inline int +swap_cgroup_swapon(int type, unsigned long max_pages) +{ + return 0; +} + +static inline void swap_cgroup_swapoff(int type) +{ + return; +} + +#endif /* CONFIG_MEMCG_SWAP */ + +#endif /* __LINUX_SWAP_CGROUP_H */ diff --git a/mm/Makefile b/mm/Makefile index 6d9f40e922f7..b3c6ce932c64 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -56,7 +56,8 @@ obj-$(CONFIG_MIGRATION) += migrate.o obj-$(CONFIG_QUICKLIST) += quicklist.o obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o obj-$(CONFIG_PAGE_COUNTER) += page_counter.o -obj-$(CONFIG_MEMCG) += memcontrol.o page_cgroup.o vmpressure.o +obj-$(CONFIG_MEMCG) += memcontrol.o vmpressure.o +obj-$(CONFIG_MEMCG_SWAP) += swap_cgroup.o obj-$(CONFIG_CGROUP_HUGETLB) += hugetlb_cgroup.o obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o obj-$(CONFIG_HWPOISON_INJECT) += hwpoison-inject.o diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b864067791dc..ab270e34ba3e 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -51,7 +51,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c deleted file mode 100644 index f0f31c1d4d0c..000000000000 --- a/mm/page_cgroup.c +++ /dev/null @@ -1,211 +0,0 @@ -#include -#include -#include -#include - -#ifdef CONFIG_MEMCG_SWAP - -static DEFINE_MUTEX(swap_cgroup_mutex); -struct swap_cgroup_ctrl { - struct page **map; - unsigned long length; - spinlock_t lock; -}; - -static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES]; - -struct swap_cgroup { - unsigned short id; -}; -#define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup)) - -/* - * SwapCgroup implements "lookup" and "exchange" operations. - * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge - * against SwapCache. At swap_free(), this is accessed directly from swap. - * - * This means, - * - we have no race in "exchange" when we're accessed via SwapCache because - * SwapCache(and its swp_entry) is under lock. - * - When called via swap_free(), there is no user of this entry and no race. - * Then, we don't need lock around "exchange". - * - * TODO: we can push these buffers out to HIGHMEM. - */ - -/* - * allocate buffer for swap_cgroup. - */ -static int swap_cgroup_prepare(int type) -{ - struct page *page; - struct swap_cgroup_ctrl *ctrl; - unsigned long idx, max; - - ctrl = &swap_cgroup_ctrl[type]; - - for (idx = 0; idx < ctrl->length; idx++) { - page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!page) - goto not_enough_page; - ctrl->map[idx] = page; - } - return 0; -not_enough_page: - max = idx; - for (idx = 0; idx < max; idx++) - __free_page(ctrl->map[idx]); - - return -ENOMEM; -} - -static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent, - struct swap_cgroup_ctrl **ctrlp) -{ - pgoff_t offset = swp_offset(ent); - struct swap_cgroup_ctrl *ctrl; - struct page *mappage; - struct swap_cgroup *sc; - - ctrl = &swap_cgroup_ctrl[swp_type(ent)]; - if (ctrlp) - *ctrlp = ctrl; - - mappage = ctrl->map[offset / SC_PER_PAGE]; - sc = page_address(mappage); - return sc + offset % SC_PER_PAGE; -} - -/** - * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry. - * @ent: swap entry to be cmpxchged - * @old: old id - * @new: new id - * - * Returns old id at success, 0 at failure. - * (There is no mem_cgroup using 0 as its id) - */ -unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, - unsigned short old, unsigned short new) -{ - struct swap_cgroup_ctrl *ctrl; - struct swap_cgroup *sc; - unsigned long flags; - unsigned short retval; - - sc = lookup_swap_cgroup(ent, &ctrl); - - spin_lock_irqsave(&ctrl->lock, flags); - retval = sc->id; - if (retval == old) - sc->id = new; - else - retval = 0; - spin_unlock_irqrestore(&ctrl->lock, flags); - return retval; -} - -/** - * swap_cgroup_record - record mem_cgroup for this swp_entry. - * @ent: swap entry to be recorded into - * @id: mem_cgroup to be recorded - * - * Returns old value at success, 0 at failure. - * (Of course, old value can be 0.) - */ -unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) -{ - struct swap_cgroup_ctrl *ctrl; - struct swap_cgroup *sc; - unsigned short old; - unsigned long flags; - - sc = lookup_swap_cgroup(ent, &ctrl); - - spin_lock_irqsave(&ctrl->lock, flags); - old = sc->id; - sc->id = id; - spin_unlock_irqrestore(&ctrl->lock, flags); - - return old; -} - -/** - * lookup_swap_cgroup_id - lookup mem_cgroup id tied to swap entry - * @ent: swap entry to be looked up. - * - * Returns ID of mem_cgroup at success. 0 at failure. (0 is invalid ID) - */ -unsigned short lookup_swap_cgroup_id(swp_entry_t ent) -{ - return lookup_swap_cgroup(ent, NULL)->id; -} - -int swap_cgroup_swapon(int type, unsigned long max_pages) -{ - void *array; - unsigned long array_size; - unsigned long length; - struct swap_cgroup_ctrl *ctrl; - - if (!do_swap_account) - return 0; - - length = DIV_ROUND_UP(max_pages, SC_PER_PAGE); - array_size = length * sizeof(void *); - - array = vzalloc(array_size); - if (!array) - goto nomem; - - ctrl = &swap_cgroup_ctrl[type]; - mutex_lock(&swap_cgroup_mutex); - ctrl->length = length; - ctrl->map = array; - spin_lock_init(&ctrl->lock); - if (swap_cgroup_prepare(type)) { - /* memory shortage */ - ctrl->map = NULL; - ctrl->length = 0; - mutex_unlock(&swap_cgroup_mutex); - vfree(array); - goto nomem; - } - mutex_unlock(&swap_cgroup_mutex); - - return 0; -nomem: - printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n"); - printk(KERN_INFO - "swap_cgroup can be disabled by swapaccount=0 boot option\n"); - return -ENOMEM; -} - -void swap_cgroup_swapoff(int type) -{ - struct page **map; - unsigned long i, length; - struct swap_cgroup_ctrl *ctrl; - - if (!do_swap_account) - return; - - mutex_lock(&swap_cgroup_mutex); - ctrl = &swap_cgroup_ctrl[type]; - map = ctrl->map; - length = ctrl->length; - ctrl->map = NULL; - ctrl->length = 0; - mutex_unlock(&swap_cgroup_mutex); - - if (map) { - for (i = 0; i < length; i++) { - struct page *page = map[i]; - if (page) - __free_page(page); - } - vfree(map); - } -} - -#endif diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c new file mode 100644 index 000000000000..b5f7f24b8dd1 --- /dev/null +++ b/mm/swap_cgroup.c @@ -0,0 +1,208 @@ +#include +#include +#include + +#include /* depends on mm.h include */ + +static DEFINE_MUTEX(swap_cgroup_mutex); +struct swap_cgroup_ctrl { + struct page **map; + unsigned long length; + spinlock_t lock; +}; + +static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES]; + +struct swap_cgroup { + unsigned short id; +}; +#define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup)) + +/* + * SwapCgroup implements "lookup" and "exchange" operations. + * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge + * against SwapCache. At swap_free(), this is accessed directly from swap. + * + * This means, + * - we have no race in "exchange" when we're accessed via SwapCache because + * SwapCache(and its swp_entry) is under lock. + * - When called via swap_free(), there is no user of this entry and no race. + * Then, we don't need lock around "exchange". + * + * TODO: we can push these buffers out to HIGHMEM. + */ + +/* + * allocate buffer for swap_cgroup. + */ +static int swap_cgroup_prepare(int type) +{ + struct page *page; + struct swap_cgroup_ctrl *ctrl; + unsigned long idx, max; + + ctrl = &swap_cgroup_ctrl[type]; + + for (idx = 0; idx < ctrl->length; idx++) { + page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!page) + goto not_enough_page; + ctrl->map[idx] = page; + } + return 0; +not_enough_page: + max = idx; + for (idx = 0; idx < max; idx++) + __free_page(ctrl->map[idx]); + + return -ENOMEM; +} + +static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent, + struct swap_cgroup_ctrl **ctrlp) +{ + pgoff_t offset = swp_offset(ent); + struct swap_cgroup_ctrl *ctrl; + struct page *mappage; + struct swap_cgroup *sc; + + ctrl = &swap_cgroup_ctrl[swp_type(ent)]; + if (ctrlp) + *ctrlp = ctrl; + + mappage = ctrl->map[offset / SC_PER_PAGE]; + sc = page_address(mappage); + return sc + offset % SC_PER_PAGE; +} + +/** + * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry. + * @ent: swap entry to be cmpxchged + * @old: old id + * @new: new id + * + * Returns old id at success, 0 at failure. + * (There is no mem_cgroup using 0 as its id) + */ +unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, + unsigned short old, unsigned short new) +{ + struct swap_cgroup_ctrl *ctrl; + struct swap_cgroup *sc; + unsigned long flags; + unsigned short retval; + + sc = lookup_swap_cgroup(ent, &ctrl); + + spin_lock_irqsave(&ctrl->lock, flags); + retval = sc->id; + if (retval == old) + sc->id = new; + else + retval = 0; + spin_unlock_irqrestore(&ctrl->lock, flags); + return retval; +} + +/** + * swap_cgroup_record - record mem_cgroup for this swp_entry. + * @ent: swap entry to be recorded into + * @id: mem_cgroup to be recorded + * + * Returns old value at success, 0 at failure. + * (Of course, old value can be 0.) + */ +unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) +{ + struct swap_cgroup_ctrl *ctrl; + struct swap_cgroup *sc; + unsigned short old; + unsigned long flags; + + sc = lookup_swap_cgroup(ent, &ctrl); + + spin_lock_irqsave(&ctrl->lock, flags); + old = sc->id; + sc->id = id; + spin_unlock_irqrestore(&ctrl->lock, flags); + + return old; +} + +/** + * lookup_swap_cgroup_id - lookup mem_cgroup id tied to swap entry + * @ent: swap entry to be looked up. + * + * Returns ID of mem_cgroup at success. 0 at failure. (0 is invalid ID) + */ +unsigned short lookup_swap_cgroup_id(swp_entry_t ent) +{ + return lookup_swap_cgroup(ent, NULL)->id; +} + +int swap_cgroup_swapon(int type, unsigned long max_pages) +{ + void *array; + unsigned long array_size; + unsigned long length; + struct swap_cgroup_ctrl *ctrl; + + if (!do_swap_account) + return 0; + + length = DIV_ROUND_UP(max_pages, SC_PER_PAGE); + array_size = length * sizeof(void *); + + array = vzalloc(array_size); + if (!array) + goto nomem; + + ctrl = &swap_cgroup_ctrl[type]; + mutex_lock(&swap_cgroup_mutex); + ctrl->length = length; + ctrl->map = array; + spin_lock_init(&ctrl->lock); + if (swap_cgroup_prepare(type)) { + /* memory shortage */ + ctrl->map = NULL; + ctrl->length = 0; + mutex_unlock(&swap_cgroup_mutex); + vfree(array); + goto nomem; + } + mutex_unlock(&swap_cgroup_mutex); + + return 0; +nomem: + printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n"); + printk(KERN_INFO + "swap_cgroup can be disabled by swapaccount=0 boot option\n"); + return -ENOMEM; +} + +void swap_cgroup_swapoff(int type) +{ + struct page **map; + unsigned long i, length; + struct swap_cgroup_ctrl *ctrl; + + if (!do_swap_account) + return; + + mutex_lock(&swap_cgroup_mutex); + ctrl = &swap_cgroup_ctrl[type]; + map = ctrl->map; + length = ctrl->length; + ctrl->map = NULL; + ctrl->length = 0; + mutex_unlock(&swap_cgroup_mutex); + + if (map) { + for (i = 0; i < length; i++) { + struct page *page = map[i]; + if (page) + __free_page(page); + } + vfree(map); + } +} diff --git a/mm/swap_state.c b/mm/swap_state.c index 154444918685..9711342987a0 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -17,7 +17,6 @@ #include #include #include -#include #include diff --git a/mm/swapfile.c b/mm/swapfile.c index 8798b2e0ac59..63f55ccb9b26 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -38,7 +38,7 @@ #include #include #include -#include +#include static bool swap_count_continued(struct swap_info_struct *, pgoff_t, unsigned char); -- cgit v1.2.3-59-g8ed1b From 7b212edf86e28f4d0198f2152b2f0553f51a59a5 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 10 Dec 2014 15:51:24 -0800 Subject: MAINTAINERS: update ivtv mailing lists as subscriber-only Mark these as subscriber-only mailing lists. Signed-off-by: Joe Perches Cc: Andy Walls Cc: Mauro Carvalho Chehab Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- MAINTAINERS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 0aedd3e1804b..4b79094bb051 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2723,7 +2723,7 @@ F: drivers/net/wireless/cw1200/ CX18 VIDEO4LINUX DRIVER M: Andy Walls -L: ivtv-devel@ivtvdriver.org (moderated for non-subscribers) +L: ivtv-devel@ivtvdriver.org (subscribers-only) L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: http://linuxtv.org @@ -5209,7 +5209,7 @@ F: drivers/media/tuners/it913x* IVTV VIDEO4LINUX DRIVER M: Andy Walls -L: ivtv-devel@ivtvdriver.org (moderated for non-subscribers) +L: ivtv-devel@ivtvdriver.org (subscribers-only) L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: http://www.ivtvdriver.org -- cgit v1.2.3-59-g8ed1b From b4b982979eb2e97d2cd746eebb99b5452041170d Mon Sep 17 00:00:00 2001 From: Xiubo Li Date: Fri, 12 Dec 2014 16:54:14 -0800 Subject: MAINTAINERS: update Xiubo's email address My current email address will be gone shortly, update my email to be a gmail one. Signed-off-by: Xiubo Li Cc: Timur Tabi Cc: Takashi Iwai Acked-by: Nicolin Chen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index e186bf90ed8a..68764f6d3898 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4037,7 +4037,7 @@ F: drivers/tty/serial/ucc_uart.c FREESCALE SOC SOUND DRIVERS M: Timur Tabi M: Nicolin Chen -M: Xiubo Li +M: Xiubo Li L: alsa-devel@alsa-project.org (moderated for non-subscribers) L: linuxppc-dev@lists.ozlabs.org S: Maintained -- cgit v1.2.3-59-g8ed1b From f0905c5a32ce6e9b743b4d9c70e53d1ce447852d Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Fri, 12 Dec 2014 16:25:47 -0800 Subject: MAINTAINERS: Add me as x86 VDSO submaintainer Here goes... :) Signed-off-by: Andy Lutomirski Acked-by: Thomas Gleixner Link: http://lkml.kernel.org/r/1042001e502f8e0deb0edfeeac209b68378650cf.1418430292.git.luto@amacapital.net Signed-off-by: Ingo Molnar --- MAINTAINERS | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index c444907ccd69..7a54fa88018a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10295,6 +10295,13 @@ L: linux-edac@vger.kernel.org S: Maintained F: arch/x86/kernel/cpu/mcheck/* +X86 VDSO +M: Andy Lutomirski +L: linux-kernel@vger.kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/vdso +S: Maintained +F: arch/x86/vdso/ + XC2028/3028 TUNER DRIVER M: Mauro Carvalho Chehab L: linux-media@vger.kernel.org -- cgit v1.2.3-59-g8ed1b From 7c1ac18dc02c105a199167ecc495944bd0e14d5a Mon Sep 17 00:00:00 2001 From: Kristen Carlson Accardi Date: Mon, 15 Dec 2014 15:58:24 -0800 Subject: MAINTAINERS: add entry for intel_pstate Add entry for intel_pstate. Signed-off-by: Kristen Carlson Accardi Signed-off-by: Rafael J. Wysocki --- MAINTAINERS | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 0ff630de8a6d..888d8bd8b5b7 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4837,6 +4837,12 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux.git S: Supported F: drivers/idle/intel_idle.c +INTEL PSTATE DRIVER +M: Kristen Carlson Accardi +L: linux-pm@vger.kernel.org +S: Supported +F: drivers/cpufreq/intel_pstate.c + INTEL FRAMEBUFFER DRIVER (excluding 810 and 815) M: Maik Broemme L: linux-fbdev@vger.kernel.org -- cgit v1.2.3-59-g8ed1b From 77911fd267cd8b564f03f025eb128b115489bbe5 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Wed, 10 Dec 2014 04:22:37 -0300 Subject: [media] MAINTAINERS: vivi -> vivid The vivi driver no longer exists and is replaced by the vivid driver. Update MAINTAINERS accordingly. Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- MAINTAINERS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 9c49eb680020..6dc7f505b13c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10032,13 +10032,13 @@ L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/via/via-velocity.* -VIVI VIRTUAL VIDEO DRIVER +VIVID VIRTUAL VIDEO DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: http://linuxtv.org S: Maintained -F: drivers/media/platform/vivi* +F: drivers/media/platform/vivid/* VLAN (802.1Q) M: Patrick McHardy -- cgit v1.2.3-59-g8ed1b From 0e324cf640fbd517615071c336fd4148fb5ce578 Mon Sep 17 00:00:00 2001 From: "John W. Linville" Date: Wed, 17 Dec 2014 12:07:05 -0500 Subject: MAINTAINERS: changes for wireless http://marc.info/?l=linux-wireless&m=141883202530292&w=2 This makes it official... :-) Signed-off-by: John W. Linville Signed-off-by: David S. Miller --- MAINTAINERS | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index fdffe962a16a..e82d31aeb936 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6603,19 +6603,8 @@ L: netdev@vger.kernel.org S: Maintained NETWORKING [WIRELESS] -M: "John W. Linville" L: linux-wireless@vger.kernel.org Q: http://patchwork.kernel.org/project/linux-wireless/list/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git -S: Maintained -F: net/mac80211/ -F: net/rfkill/ -F: net/wireless/ -F: include/net/ieee80211* -F: include/linux/wireless.h -F: include/uapi/linux/wireless.h -F: include/net/iw_handler.h -F: drivers/net/wireless/ NETWORKING DRIVERS L: netdev@vger.kernel.org @@ -6636,6 +6625,14 @@ F: include/linux/inetdevice.h F: include/uapi/linux/if_* F: include/uapi/linux/netdevice.h +NETWORKING DRIVERS (WIRELESS) +M: Kalle Valo +L: linux-wireless@vger.kernel.org +Q: http://patchwork.kernel.org/project/linux-wireless/list/ +T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git/ +S: Maintained +F: drivers/net/wireless/ + NETXEN (1/10) GbE SUPPORT M: Manish Chopra M: Sony Chacko -- cgit v1.2.3-59-g8ed1b From af6a5af8e8cc1566fc06636de02347825808650e Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Thu, 18 Dec 2014 09:24:50 -0800 Subject: Input: add new sun4i-lradc-keys driver Allwinnner sunxi SoCs have a low resolution adc (called lradc) which is specifically designed to have various (tablet) keys (ie home, back, search, etc). attached to it using a resistor network. This adds a driver for this. There are 2 channels, currently this driver only supports chan0 since there are no boards known to use chan1. This has been tested on an olimex a10s-olinuxino-micro, a13-olinuxino, and a20-olinuxino-micro. Signed-off-by: Hans de Goede Signed-off-by: Dmitry Torokhov --- .../devicetree/bindings/input/sun4i-lradc-keys.txt | 62 +++++ MAINTAINERS | 7 + drivers/input/keyboard/Kconfig | 10 + drivers/input/keyboard/Makefile | 1 + drivers/input/keyboard/sun4i-lradc-keys.c | 286 +++++++++++++++++++++ 5 files changed, 366 insertions(+) create mode 100644 Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt create mode 100644 drivers/input/keyboard/sun4i-lradc-keys.c (limited to 'MAINTAINERS') diff --git a/Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt b/Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt new file mode 100644 index 000000000000..b9c32f6fd687 --- /dev/null +++ b/Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt @@ -0,0 +1,62 @@ +Allwinner sun4i low res adc attached tablet keys +------------------------------------------------ + +Required properties: + - compatible: "allwinner,sun4i-a10-lradc-keys" + - reg: mmio address range of the chip + - interrupts: interrupt to which the chip is connected + - vref-supply: powersupply for the lradc reference voltage + +Each key is represented as a sub-node of "allwinner,sun4i-a10-lradc-keys": + +Required subnode-properties: + - label: Descriptive name of the key. + - linux,code: Keycode to emit. + - channel: Channel this key is attached to, mut be 0 or 1. + - voltage: Voltage in µV at lradc input when this key is pressed. + +Example: + +#include + + lradc: lradc@01c22800 { + compatible = "allwinner,sun4i-a10-lradc-keys"; + reg = <0x01c22800 0x100>; + interrupts = <31>; + vref-supply = <®_vcc3v0>; + + button@191 { + label = "Volume Up"; + linux,code = ; + channel = <0>; + voltage = <191274>; + }; + + button@392 { + label = "Volume Down"; + linux,code = ; + channel = <0>; + voltage = <392644>; + }; + + button@601 { + label = "Menu"; + linux,code = ; + channel = <0>; + voltage = <601151>; + }; + + button@795 { + label = "Enter"; + linux,code = ; + channel = <0>; + voltage = <795090>; + }; + + button@987 { + label = "Home"; + linux,code = ; + channel = <0>; + voltage = <987387>; + }; + }; diff --git a/MAINTAINERS b/MAINTAINERS index f73bb4179832..21b834b191d0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8806,6 +8806,13 @@ F: arch/m68k/sun3*/ F: arch/m68k/include/asm/sun3* F: drivers/net/ethernet/i825xx/sun3* +SUN4I LOW RES ADC ATTACHED TABLET KEYS DRIVER +M: Hans de Goede +L: linux-input@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt +F: drivers/input/keyboard/sun4i-lradc-keys.c + SUNDANCE NETWORK DRIVER M: Denis Kirjanov L: netdev@vger.kernel.org diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index a5d9b3f3c871..a89ba7cb96f1 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -568,6 +568,16 @@ config KEYBOARD_STMPE To compile this driver as a module, choose M here: the module will be called stmpe-keypad. +config KEYBOARD_SUN4I_LRADC + tristate "Allwinner sun4i low res adc attached tablet keys support" + depends on ARCH_SUNXI + help + This selects support for the Allwinner low res adc attached tablet + keys found on Allwinner sunxi SoCs. + + To compile this driver as a module, choose M here: the + module will be called sun4i-lradc-keys. + config KEYBOARD_DAVINCI tristate "TI DaVinci Key Scan" depends on ARCH_DAVINCI_DM365 diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile index febafa527eb6..470767884bd8 100644 --- a/drivers/input/keyboard/Makefile +++ b/drivers/input/keyboard/Makefile @@ -53,6 +53,7 @@ obj-$(CONFIG_KEYBOARD_SPEAR) += spear-keyboard.o obj-$(CONFIG_KEYBOARD_STMPE) += stmpe-keypad.o obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o obj-$(CONFIG_KEYBOARD_ST_KEYSCAN) += st-keyscan.o +obj-$(CONFIG_KEYBOARD_SUN4I_LRADC) += sun4i-lradc-keys.o obj-$(CONFIG_KEYBOARD_SUNKBD) += sunkbd.o obj-$(CONFIG_KEYBOARD_TC3589X) += tc3589x-keypad.o obj-$(CONFIG_KEYBOARD_TEGRA) += tegra-kbc.o diff --git a/drivers/input/keyboard/sun4i-lradc-keys.c b/drivers/input/keyboard/sun4i-lradc-keys.c new file mode 100644 index 000000000000..cc8f7ddcee53 --- /dev/null +++ b/drivers/input/keyboard/sun4i-lradc-keys.c @@ -0,0 +1,286 @@ +/* + * Allwinner sun4i low res adc attached tablet keys driver + * + * Copyright (C) 2014 Hans de Goede + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Allwinnner sunxi SoCs have a lradc which is specifically designed to have + * various (tablet) keys (ie home, back, search, etc). attached to it using + * a resistor network. This driver is for the keys on such boards. + * + * There are 2 channels, currently this driver only supports channel 0 since + * there are no boards known to use channel 1. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define LRADC_CTRL 0x00 +#define LRADC_INTC 0x04 +#define LRADC_INTS 0x08 +#define LRADC_DATA0 0x0c +#define LRADC_DATA1 0x10 + +/* LRADC_CTRL bits */ +#define FIRST_CONVERT_DLY(x) ((x) << 24) /* 8 bits */ +#define CHAN_SELECT(x) ((x) << 22) /* 2 bits */ +#define CONTINUE_TIME_SEL(x) ((x) << 16) /* 4 bits */ +#define KEY_MODE_SEL(x) ((x) << 12) /* 2 bits */ +#define LEVELA_B_CNT(x) ((x) << 8) /* 4 bits */ +#define HOLD_EN(x) ((x) << 6) +#define LEVELB_VOL(x) ((x) << 4) /* 2 bits */ +#define SAMPLE_RATE(x) ((x) << 2) /* 2 bits */ +#define ENABLE(x) ((x) << 0) + +/* LRADC_INTC and LRADC_INTS bits */ +#define CHAN1_KEYUP_IRQ BIT(12) +#define CHAN1_ALRDY_HOLD_IRQ BIT(11) +#define CHAN1_HOLD_IRQ BIT(10) +#define CHAN1_KEYDOWN_IRQ BIT(9) +#define CHAN1_DATA_IRQ BIT(8) +#define CHAN0_KEYUP_IRQ BIT(4) +#define CHAN0_ALRDY_HOLD_IRQ BIT(3) +#define CHAN0_HOLD_IRQ BIT(2) +#define CHAN0_KEYDOWN_IRQ BIT(1) +#define CHAN0_DATA_IRQ BIT(0) + +struct sun4i_lradc_keymap { + u32 voltage; + u32 keycode; +}; + +struct sun4i_lradc_data { + struct device *dev; + struct input_dev *input; + void __iomem *base; + struct regulator *vref_supply; + struct sun4i_lradc_keymap *chan0_map; + u32 chan0_map_count; + u32 chan0_keycode; + u32 vref; +}; + +static irqreturn_t sun4i_lradc_irq(int irq, void *dev_id) +{ + struct sun4i_lradc_data *lradc = dev_id; + u32 i, ints, val, voltage, diff, keycode = 0, closest = 0xffffffff; + + ints = readl(lradc->base + LRADC_INTS); + + /* + * lradc supports only one keypress at a time, release does not give + * any info as to which key was released, so we cache the keycode. + */ + + if (ints & CHAN0_KEYUP_IRQ) { + input_report_key(lradc->input, lradc->chan0_keycode, 0); + lradc->chan0_keycode = 0; + } + + if ((ints & CHAN0_KEYDOWN_IRQ) && lradc->chan0_keycode == 0) { + val = readl(lradc->base + LRADC_DATA0) & 0x3f; + voltage = val * lradc->vref / 63; + + for (i = 0; i < lradc->chan0_map_count; i++) { + diff = abs(lradc->chan0_map[i].voltage - voltage); + if (diff < closest) { + closest = diff; + keycode = lradc->chan0_map[i].keycode; + } + } + + lradc->chan0_keycode = keycode; + input_report_key(lradc->input, lradc->chan0_keycode, 1); + } + + input_sync(lradc->input); + + writel(ints, lradc->base + LRADC_INTS); + + return IRQ_HANDLED; +} + +static int sun4i_lradc_open(struct input_dev *dev) +{ + struct sun4i_lradc_data *lradc = input_get_drvdata(dev); + int error; + + error = regulator_enable(lradc->vref_supply); + if (error) + return error; + + /* lradc Vref internally is divided by 2/3 */ + lradc->vref = regulator_get_voltage(lradc->vref_supply) * 2 / 3; + + /* + * Set sample time to 4 ms / 250 Hz. Wait 2 * 4 ms for key to + * stabilize on press, wait (1 + 1) * 4 ms for key release + */ + writel(FIRST_CONVERT_DLY(2) | LEVELA_B_CNT(1) | HOLD_EN(1) | + SAMPLE_RATE(0) | ENABLE(1), lradc->base + LRADC_CTRL); + + writel(CHAN0_KEYUP_IRQ | CHAN0_KEYDOWN_IRQ, lradc->base + LRADC_INTC); + + return 0; +} + +static void sun4i_lradc_close(struct input_dev *dev) +{ + struct sun4i_lradc_data *lradc = input_get_drvdata(dev); + + /* Disable lradc, leave other settings unchanged */ + writel(FIRST_CONVERT_DLY(2) | LEVELA_B_CNT(1) | HOLD_EN(1) | + SAMPLE_RATE(2), lradc->base + LRADC_CTRL); + writel(0, lradc->base + LRADC_INTC); + + regulator_disable(lradc->vref_supply); +} + +static int sun4i_lradc_load_dt_keymap(struct device *dev, + struct sun4i_lradc_data *lradc) +{ + struct device_node *np, *pp; + int i; + int error; + + np = dev->of_node; + if (!np) + return -EINVAL; + + lradc->chan0_map_count = of_get_child_count(np); + if (lradc->chan0_map_count == 0) { + dev_err(dev, "keymap is missing in device tree\n"); + return -EINVAL; + } + + lradc->chan0_map = devm_kmalloc_array(dev, lradc->chan0_map_count, + sizeof(struct sun4i_lradc_keymap), + GFP_KERNEL); + if (!lradc->chan0_map) + return -ENOMEM; + + i = 0; + for_each_child_of_node(np, pp) { + struct sun4i_lradc_keymap *map = &lradc->chan0_map[i]; + u32 channel; + + error = of_property_read_u32(pp, "channel", &channel); + if (error || channel != 0) { + dev_err(dev, "%s: Inval channel prop\n", pp->name); + return -EINVAL; + } + + error = of_property_read_u32(pp, "voltage", &map->voltage); + if (error) { + dev_err(dev, "%s: Inval voltage prop\n", pp->name); + return -EINVAL; + } + + error = of_property_read_u32(pp, "linux,code", &map->keycode); + if (error) { + dev_err(dev, "%s: Inval linux,code prop\n", pp->name); + return -EINVAL; + } + + i++; + } + + return 0; +} + +static int sun4i_lradc_probe(struct platform_device *pdev) +{ + struct sun4i_lradc_data *lradc; + struct device *dev = &pdev->dev; + int i; + int error; + + lradc = devm_kzalloc(dev, sizeof(struct sun4i_lradc_data), GFP_KERNEL); + if (!lradc) + return -ENOMEM; + + error = sun4i_lradc_load_dt_keymap(dev, lradc); + if (error) + return error; + + lradc->vref_supply = devm_regulator_get(dev, "vref"); + if (IS_ERR(lradc->vref_supply)) + return PTR_ERR(lradc->vref_supply); + + lradc->dev = dev; + lradc->input = devm_input_allocate_device(dev); + if (!lradc->input) + return -ENOMEM; + + lradc->input->name = pdev->name; + lradc->input->phys = "sun4i_lradc/input0"; + lradc->input->open = sun4i_lradc_open; + lradc->input->close = sun4i_lradc_close; + lradc->input->id.bustype = BUS_HOST; + lradc->input->id.vendor = 0x0001; + lradc->input->id.product = 0x0001; + lradc->input->id.version = 0x0100; + + __set_bit(EV_KEY, lradc->input->evbit); + for (i = 0; i < lradc->chan0_map_count; i++) + __set_bit(lradc->chan0_map[i].keycode, lradc->input->keybit); + + input_set_drvdata(lradc->input, lradc); + + lradc->base = devm_ioremap_resource(dev, + platform_get_resource(pdev, IORESOURCE_MEM, 0)); + if (IS_ERR(lradc->base)) + return PTR_ERR(lradc->base); + + error = devm_request_irq(dev, platform_get_irq(pdev, 0), + sun4i_lradc_irq, 0, + "sun4i-a10-lradc-keys", lradc); + if (error) + return error; + + error = input_register_device(lradc->input); + if (error) + return error; + + platform_set_drvdata(pdev, lradc); + return 0; +} + +static const struct of_device_id sun4i_lradc_of_match[] = { + { .compatible = "allwinner,sun4i-a10-lradc-keys", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sun4i_lradc_of_match); + +static struct platform_driver sun4i_lradc_driver = { + .driver = { + .name = "sun4i-a10-lradc-keys", + .of_match_table = of_match_ptr(sun4i_lradc_of_match), + }, + .probe = sun4i_lradc_probe, +}; + +module_platform_driver(sun4i_lradc_driver); + +MODULE_DESCRIPTION("Allwinner sun4i low res adc attached tablet keys driver"); +MODULE_AUTHOR("Hans de Goede "); +MODULE_LICENSE("GPL"); -- cgit v1.2.3-59-g8ed1b From 4e0c4a47d723c6bfdf24323cf539bd08edff7d31 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Tue, 30 Dec 2014 09:00:02 -0800 Subject: Btrfs: add more maintainers I'm lucky to have a huge amount of help on Btrfs, and want to thank everyone that sends patches, does review and helps track down bugs. Dave Sterba is a long time reviewer and contributor, and adding him to the maintainers file reflects the excellent work he has been doing for years. Signed-off-by: Chris Mason --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 0ff630de8a6d..189c7c598f10 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2189,6 +2189,7 @@ F: drivers/gpio/gpio-bt8xx.c BTRFS FILE SYSTEM M: Chris Mason M: Josef Bacik +M: David Sterba L: linux-btrfs@vger.kernel.org W: http://btrfs.wiki.kernel.org/ Q: http://patchwork.kernel.org/project/linux-btrfs/list/ -- cgit v1.2.3-59-g8ed1b From 75dd112aac25713cd686cb4bfa78cf907519c504 Mon Sep 17 00:00:00 2001 From: Henrik Rydberg Date: Thu, 8 Jan 2015 14:32:21 -0800 Subject: MAINTAINERS: update rydberg's addresses My ISP finally gave up on the old mail address, so I am moving things over to bitmath.org instead. Also change the status fields to better reflect reality. Signed-off-by: Henrik Rydberg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- .mailmap | 1 + MAINTAINERS | 12 ++++++------ 2 files changed, 7 insertions(+), 6 deletions(-) (limited to 'MAINTAINERS') diff --git a/.mailmap b/.mailmap index ada8ad696b2e..d357e1bd2a43 100644 --- a/.mailmap +++ b/.mailmap @@ -51,6 +51,7 @@ Greg Kroah-Hartman Greg Kroah-Hartman Henk Vergonet Henrik Kretzschmar +Henrik Rydberg Herbert Xu Jacob Shin James Bottomley diff --git a/MAINTAINERS b/MAINTAINERS index ddb9ac8d32b3..79b2e4ba78ee 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -724,15 +724,15 @@ F: include/uapi/linux/apm_bios.h F: drivers/char/apm-emulation.c APPLE BCM5974 MULTITOUCH DRIVER -M: Henrik Rydberg +M: Henrik Rydberg L: linux-input@vger.kernel.org -S: Maintained +S: Odd fixes F: drivers/input/mouse/bcm5974.c APPLE SMC DRIVER -M: Henrik Rydberg +M: Henrik Rydberg L: lm-sensors@lm-sensors.org -S: Maintained +S: Odd fixes F: drivers/hwmon/applesmc.c APPLETALK NETWORK LAYER @@ -4940,10 +4940,10 @@ F: include/uapi/linux/input.h F: include/linux/input/ INPUT MULTITOUCH (MT) PROTOCOL -M: Henrik Rydberg +M: Henrik Rydberg L: linux-input@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/rydberg/input-mt.git -S: Maintained +S: Odd fixes F: Documentation/input/multi-touch-protocol.txt F: drivers/input/input-mt.c K: \b(ABS|SYN)_MT_ -- cgit v1.2.3-59-g8ed1b From f17effbe891624fe1ec2dcfa68625ed1c98ebe3a Mon Sep 17 00:00:00 2001 From: Moritz Fischer Date: Sat, 10 Jan 2015 14:10:59 -0800 Subject: MAINTAINERS: add info for e3x0-button driver Signed-off-by: Moritz Fischer Signed-off-by: Dmitry Torokhov --- MAINTAINERS | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 21b834b191d0..d7269172981d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3277,6 +3277,14 @@ M: "Maciej W. Rozycki" S: Maintained F: drivers/tty/serial/dz.* +E3X0 POWER BUTTON DRIVER +M: Moritz Fischer +L: usrp-users@lists.ettus.com +W: http://www.ettus.com +S: Supported +F: drivers/input/misc/e3x0-button.c +F: Documentation/devicetree/bindings/input/e3x0-button.txt + E4000 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org -- cgit v1.2.3-59-g8ed1b