Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (30 commits)
perf: Add back list_head data types
perf ui hist browser: Fixup key bindings
perf ui browser: Add ui_browser__show counterpart: __hide
perf annotate: Cycle thru sorted lines with samples
perf ui: Make SPACE work as PGDN in all browsers
perf annotate: Sort by hottest lines in the TUI
perf ui: Complete the breakdown of util/newt.c
perf ui: Move hists browser to util/ui/browsers/
perf symbols: Ignore mapping symbols on ARM
perf ui: Move map browser to util/ui/browsers/
perf ui: Move annotate browser to util/ui/browsers/
perf ui: Move ui_progress routines to separate file in util/ui/
perf ui: Move ui_helpline routines to separate file in util/ui/
perf ui: Shorten ui_browser member names
perf, x86: P4 PMU -- update nmi irq statistics and unmask lvt entry properly
perf ui: Start breaking down newt.c into multiple files
perf tui: Introduce list_head based generic ui_browser refresh routine
perf probe: Fix memory leaks in add_perf_probe_events
perf probe: Fix to copy the type for raw parameters
perf report: Speed up exit path
...
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index 625c3f0..8ca8283 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -37,6 +37,13 @@
#define __pa_nodebug(x) __phys_addr_nodebug((unsigned long)(x))
/* __pa_symbol should be used for C visible symbols.
This seems to be the official gcc blessed way to do such arithmetic. */
+/*
+ * We need __phys_reloc_hide() here because gcc may assume that there is no
+ * overflow during __pa() calculation and can optimize it unexpectedly.
+ * Newer versions of gcc provide -fno-strict-overflow switch to handle this
+ * case properly. Once all supported versions of gcc understand it, we can
+ * remove this Voodoo magic stuff. (i.e. once gcc3.x is deprecated)
+ */
#define __pa_symbol(x) __pa(__phys_reloc_hide((unsigned long)(x)))
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index aa558ac..42d412f 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -34,6 +34,7 @@
*/
#define UV_ITEMS_PER_DESCRIPTOR 8
+/* the 'throttle' to prevent the hardware stay-busy bug */
#define MAX_BAU_CONCURRENT 3
#define UV_CPUS_PER_ACT_STATUS 32
#define UV_ACT_STATUS_MASK 0x3
@@ -45,10 +46,26 @@
#define UV_DESC_BASE_PNODE_SHIFT 49
#define UV_PAYLOADQ_PNODE_SHIFT 49
#define UV_PTC_BASENAME "sgi_uv/ptc_statistics"
+#define UV_BAU_BASENAME "sgi_uv/bau_tunables"
+#define UV_BAU_TUNABLES_DIR "sgi_uv"
+#define UV_BAU_TUNABLES_FILE "bau_tunables"
+#define WHITESPACE " \t\n"
#define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask))
#define UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT 15
#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT 16
-#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL
+#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x0000000009UL
+/* [19:16] SOFT_ACK timeout period 19: 1 is urgency 7 17:16 1 is multiplier */
+#define BAU_MISC_CONTROL_MULT_MASK 3
+
+#define UVH_AGING_PRESCALE_SEL 0x000000b000UL
+/* [30:28] URGENCY_7 an index into a table of times */
+#define BAU_URGENCY_7_SHIFT 28
+#define BAU_URGENCY_7_MASK 7
+
+#define UVH_TRANSACTION_TIMEOUT 0x000000b200UL
+/* [45:40] BAU - BAU transaction timeout select - a multiplier */
+#define BAU_TRANS_SHIFT 40
+#define BAU_TRANS_MASK 0x3f
/*
* bits in UVH_LB_BAU_SB_ACTIVATION_STATUS_0/1
@@ -59,24 +76,21 @@
#define DESC_STATUS_SOURCE_TIMEOUT 3
/*
- * source side threshholds at which message retries print a warning
+ * delay for 'plugged' timeout retries, in microseconds
*/
-#define SOURCE_TIMEOUT_LIMIT 20
-#define DESTINATION_TIMEOUT_LIMIT 20
-
-/*
- * misc. delays, in microseconds
- */
-#define THROTTLE_DELAY 10
-#define TIMEOUT_DELAY 10
-#define BIOS_TO 1000
-/* BIOS is assumed to set the destination timeout to 1003520 nanoseconds */
+#define PLUGGED_DELAY 10
/*
* threshholds at which to use IPI to free resources
*/
+/* after this # consecutive 'plugged' timeouts, use IPI to release resources */
#define PLUGSB4RESET 100
-#define TIMEOUTSB4RESET 100
+/* after this many consecutive timeouts, use IPI to release resources */
+#define TIMEOUTSB4RESET 1
+/* at this number uses of IPI to release resources, giveup the request */
+#define IPI_RESET_LIMIT 1
+/* after this # consecutive successes, bump up the throttle if it was lowered */
+#define COMPLETE_THRESHOLD 5
/*
* number of entries in the destination side payload queue
@@ -96,6 +110,13 @@
#define FLUSH_COMPLETE 4
/*
+ * tuning the action when the numalink network is extremely delayed
+ */
+#define CONGESTED_RESPONSE_US 1000 /* 'long' response time, in microseconds */
+#define CONGESTED_REPS 10 /* long delays averaged over this many broadcasts */
+#define CONGESTED_PERIOD 30 /* time for the bau to be disabled, in seconds */
+
+/*
* Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor)
* If the 'multilevel' flag in the header portion of the descriptor
* has been set to 0, then endpoint multi-unicast mode is selected.
@@ -300,37 +321,16 @@
/* bytes 24-31 */
};
-/*
- * one per-cpu; to locate the software tables
- */
-struct bau_control {
- struct bau_desc *descriptor_base;
+struct msg_desc {
+ struct bau_payload_queue_entry *msg;
+ int msg_slot;
+ int sw_ack_slot;
struct bau_payload_queue_entry *va_queue_first;
struct bau_payload_queue_entry *va_queue_last;
- struct bau_payload_queue_entry *bau_msg_head;
- struct bau_control *uvhub_master;
- struct bau_control *socket_master;
- unsigned long timeout_interval;
- atomic_t active_descriptor_count;
- int max_concurrent;
- int max_concurrent_constant;
- int retry_message_scans;
- int plugged_tries;
- int timeout_tries;
- int ipi_attempts;
- int conseccompletes;
- short cpu;
- short uvhub_cpu;
- short uvhub;
- short cpus_in_socket;
- short cpus_in_uvhub;
- unsigned short message_number;
- unsigned short uvhub_quiesce;
- short socket_acknowledge_count[DEST_Q_SIZE];
- cycles_t send_message;
- spinlock_t masks_lock;
- spinlock_t uvhub_lock;
- spinlock_t queue_lock;
+};
+
+struct reset_args {
+ int sender;
};
/*
@@ -344,18 +344,25 @@
unsigned long s_dtimeout; /* destination side timeouts */
unsigned long s_time; /* time spent in sending side */
unsigned long s_retriesok; /* successful retries */
- unsigned long s_ntargcpu; /* number of cpus targeted */
- unsigned long s_ntarguvhub; /* number of uvhubs targeted */
- unsigned long s_ntarguvhub16; /* number of times >= 16 target hubs */
- unsigned long s_ntarguvhub8; /* number of times >= 8 target hubs */
- unsigned long s_ntarguvhub4; /* number of times >= 4 target hubs */
- unsigned long s_ntarguvhub2; /* number of times >= 2 target hubs */
- unsigned long s_ntarguvhub1; /* number of times == 1 target hub */
+ unsigned long s_ntargcpu; /* total number of cpu's targeted */
+ unsigned long s_ntargself; /* times the sending cpu was targeted */
+ unsigned long s_ntarglocals; /* targets of cpus on the local blade */
+ unsigned long s_ntargremotes; /* targets of cpus on remote blades */
+ unsigned long s_ntarglocaluvhub; /* targets of the local hub */
+ unsigned long s_ntargremoteuvhub; /* remotes hubs targeted */
+ unsigned long s_ntarguvhub; /* total number of uvhubs targeted */
+ unsigned long s_ntarguvhub16; /* number of times target hubs >= 16*/
+ unsigned long s_ntarguvhub8; /* number of times target hubs >= 8 */
+ unsigned long s_ntarguvhub4; /* number of times target hubs >= 4 */
+ unsigned long s_ntarguvhub2; /* number of times target hubs >= 2 */
+ unsigned long s_ntarguvhub1; /* number of times target hubs == 1 */
unsigned long s_resets_plug; /* ipi-style resets from plug state */
unsigned long s_resets_timeout; /* ipi-style resets from timeouts */
unsigned long s_busy; /* status stayed busy past s/w timer */
unsigned long s_throttles; /* waits in throttle */
unsigned long s_retry_messages; /* retry broadcasts */
+ unsigned long s_bau_reenabled; /* for bau enable/disable */
+ unsigned long s_bau_disabled; /* for bau enable/disable */
/* destination statistics */
unsigned long d_alltlb; /* times all tlb's on this cpu were flushed */
unsigned long d_onetlb; /* times just one tlb on this cpu was flushed */
@@ -370,6 +377,52 @@
unsigned long d_rcanceled; /* number of messages canceled by resets */
};
+/*
+ * one per-cpu; to locate the software tables
+ */
+struct bau_control {
+ struct bau_desc *descriptor_base;
+ struct bau_payload_queue_entry *va_queue_first;
+ struct bau_payload_queue_entry *va_queue_last;
+ struct bau_payload_queue_entry *bau_msg_head;
+ struct bau_control *uvhub_master;
+ struct bau_control *socket_master;
+ struct ptc_stats *statp;
+ unsigned long timeout_interval;
+ unsigned long set_bau_on_time;
+ atomic_t active_descriptor_count;
+ int plugged_tries;
+ int timeout_tries;
+ int ipi_attempts;
+ int conseccompletes;
+ int baudisabled;
+ int set_bau_off;
+ short cpu;
+ short uvhub_cpu;
+ short uvhub;
+ short cpus_in_socket;
+ short cpus_in_uvhub;
+ unsigned short message_number;
+ unsigned short uvhub_quiesce;
+ short socket_acknowledge_count[DEST_Q_SIZE];
+ cycles_t send_message;
+ spinlock_t uvhub_lock;
+ spinlock_t queue_lock;
+ /* tunables */
+ int max_bau_concurrent;
+ int max_bau_concurrent_constant;
+ int plugged_delay;
+ int plugsb4reset;
+ int timeoutsb4reset;
+ int ipi_reset_limit;
+ int complete_threshold;
+ int congested_response_us;
+ int congested_reps;
+ int congested_period;
+ cycles_t period_time;
+ long period_requests;
+};
+
static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp)
{
return constant_test_bit(uvhub, &dstp->bits[0]);
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 980508c..e3b534c 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1606,7 +1606,7 @@
* acpi lapic path already maps that address in
* acpi_register_lapic_address()
*/
- if (!acpi_lapic)
+ if (!acpi_lapic && !smp_found_config)
set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
apic_printk(APIC_VERBOSE, "mapped APIC to %08lx (%08lx)\n",
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index d86dbf7..d7b6f7f 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -274,6 +274,18 @@
void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { }
+static void __init smp_register_lapic_address(unsigned long address)
+{
+ mp_lapic_addr = address;
+
+ set_fixmap_nocache(FIX_APIC_BASE, address);
+ if (boot_cpu_physical_apicid == -1U) {
+ boot_cpu_physical_apicid = read_apic_id();
+ apic_version[boot_cpu_physical_apicid] =
+ GET_APIC_VERSION(apic_read(APIC_LVR));
+ }
+}
+
static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
{
char str[16];
@@ -295,6 +307,10 @@
if (early)
return 1;
+ /* Initialize the lapic mapping */
+ if (!acpi_lapic)
+ smp_register_lapic_address(mpc->lapic);
+
if (mpc->oemptr)
x86_init.mpparse.smp_read_mpc_oem(mpc);
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index 7fea555..59efb53 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -8,6 +8,7 @@
*/
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
+#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -22,19 +23,37 @@
#include <asm/irq_vectors.h>
#include <asm/timer.h>
-struct msg_desc {
- struct bau_payload_queue_entry *msg;
- int msg_slot;
- int sw_ack_slot;
- struct bau_payload_queue_entry *va_queue_first;
- struct bau_payload_queue_entry *va_queue_last;
+/* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
+static int timeout_base_ns[] = {
+ 20,
+ 160,
+ 1280,
+ 10240,
+ 81920,
+ 655360,
+ 5242880,
+ 167772160
};
-
-#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL
-
-static int uv_bau_max_concurrent __read_mostly;
-
+static int timeout_us;
static int nobau;
+static int baudisabled;
+static spinlock_t disable_lock;
+static cycles_t congested_cycles;
+
+/* tunables: */
+static int max_bau_concurrent = MAX_BAU_CONCURRENT;
+static int max_bau_concurrent_constant = MAX_BAU_CONCURRENT;
+static int plugged_delay = PLUGGED_DELAY;
+static int plugsb4reset = PLUGSB4RESET;
+static int timeoutsb4reset = TIMEOUTSB4RESET;
+static int ipi_reset_limit = IPI_RESET_LIMIT;
+static int complete_threshold = COMPLETE_THRESHOLD;
+static int congested_response_us = CONGESTED_RESPONSE_US;
+static int congested_reps = CONGESTED_REPS;
+static int congested_period = CONGESTED_PERIOD;
+static struct dentry *tunables_dir;
+static struct dentry *tunables_file;
+
static int __init setup_nobau(char *arg)
{
nobau = 1;
@@ -52,10 +71,6 @@
static DEFINE_PER_CPU(struct bau_control, bau_control);
static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
-struct reset_args {
- int sender;
-};
-
/*
* Determine the first node on a uvhub. 'Nodes' are used for kernel
* memory allocation.
@@ -126,7 +141,7 @@
struct ptc_stats *stat;
msg = mdp->msg;
- stat = &per_cpu(ptcstats, bcp->cpu);
+ stat = bcp->statp;
stat->d_retries++;
/*
* cancel any message from msg+1 to the retry itself
@@ -146,15 +161,14 @@
slot2 = msg2 - mdp->va_queue_first;
mmr = uv_read_local_mmr
(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
- msg_res = ((msg2->sw_ack_vector << 8) |
- msg2->sw_ack_vector);
+ msg_res = msg2->sw_ack_vector;
/*
* This is a message retry; clear the resources held
* by the previous message only if they timed out.
* If it has not timed out we have an unexpected
* situation to report.
*/
- if (mmr & (msg_res << 8)) {
+ if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
/*
* is the resource timed out?
* make everyone ignore the cancelled message.
@@ -164,9 +178,9 @@
cancel_count++;
uv_write_local_mmr(
UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS,
- (msg_res << 8) | msg_res);
- } else
- printk(KERN_INFO "note bau retry: no effect\n");
+ (msg_res << UV_SW_ACK_NPENDING) |
+ msg_res);
+ }
}
}
if (!cancel_count)
@@ -190,7 +204,7 @@
* This must be a normal message, or retry of a normal message
*/
msg = mdp->msg;
- stat = &per_cpu(ptcstats, bcp->cpu);
+ stat = bcp->statp;
if (msg->address == TLB_FLUSH_ALL) {
local_flush_tlb();
stat->d_alltlb++;
@@ -274,7 +288,7 @@
bcp = &per_cpu(bau_control, smp_processor_id());
rap = (struct reset_args *)ptr;
- stat = &per_cpu(ptcstats, bcp->cpu);
+ stat = bcp->statp;
stat->d_resets++;
/*
@@ -302,13 +316,13 @@
*/
mmr = uv_read_local_mmr
(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
- msg_res = ((msg->sw_ack_vector << 8) |
- msg->sw_ack_vector);
+ msg_res = msg->sw_ack_vector;
if (mmr & msg_res) {
stat->d_rcanceled++;
uv_write_local_mmr(
UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS,
- msg_res);
+ (msg_res << UV_SW_ACK_NPENDING) |
+ msg_res);
}
}
}
@@ -386,17 +400,12 @@
unsigned long mmr_offset, int right_shift, int this_cpu,
struct bau_control *bcp, struct bau_control *smaster, long try)
{
- int relaxes = 0;
unsigned long descriptor_status;
- unsigned long mmr;
- unsigned long mask;
cycles_t ttime;
- cycles_t timeout_time;
- struct ptc_stats *stat = &per_cpu(ptcstats, this_cpu);
+ struct ptc_stats *stat = bcp->statp;
struct bau_control *hmaster;
hmaster = bcp->uvhub_master;
- timeout_time = get_cycles() + bcp->timeout_interval;
/* spin on the status MMR, waiting for it to go idle */
while ((descriptor_status = (((unsigned long)
@@ -423,7 +432,8 @@
* pending. In that case hardware returns the
* ERROR that looks like a destination timeout.
*/
- if (cycles_2_us(ttime - bcp->send_message) < BIOS_TO) {
+ if (cycles_2_us(ttime - bcp->send_message) <
+ timeout_us) {
bcp->conseccompletes = 0;
return FLUSH_RETRY_PLUGGED;
}
@@ -435,26 +445,6 @@
* descriptor_status is still BUSY
*/
cpu_relax();
- relaxes++;
- if (relaxes >= 10000) {
- relaxes = 0;
- if (get_cycles() > timeout_time) {
- quiesce_local_uvhub(hmaster);
-
- /* single-thread the register change */
- spin_lock(&hmaster->masks_lock);
- mmr = uv_read_local_mmr(mmr_offset);
- mask = 0UL;
- mask |= (3UL < right_shift);
- mask = ~mask;
- mmr &= mask;
- uv_write_local_mmr(mmr_offset, mmr);
- spin_unlock(&hmaster->masks_lock);
- end_uvhub_quiesce(hmaster);
- stat->s_busy++;
- return FLUSH_GIVEUP;
- }
- }
}
}
bcp->conseccompletes++;
@@ -494,56 +484,116 @@
return 1;
}
+/*
+ * Our retries are blocked by all destination swack resources being
+ * in use, and a timeout is pending. In that case hardware immediately
+ * returns the ERROR that looks like a destination timeout.
+ */
+static void
+destination_plugged(struct bau_desc *bau_desc, struct bau_control *bcp,
+ struct bau_control *hmaster, struct ptc_stats *stat)
+{
+ udelay(bcp->plugged_delay);
+ bcp->plugged_tries++;
+ if (bcp->plugged_tries >= bcp->plugsb4reset) {
+ bcp->plugged_tries = 0;
+ quiesce_local_uvhub(hmaster);
+ spin_lock(&hmaster->queue_lock);
+ uv_reset_with_ipi(&bau_desc->distribution, bcp->cpu);
+ spin_unlock(&hmaster->queue_lock);
+ end_uvhub_quiesce(hmaster);
+ bcp->ipi_attempts++;
+ stat->s_resets_plug++;
+ }
+}
+
+static void
+destination_timeout(struct bau_desc *bau_desc, struct bau_control *bcp,
+ struct bau_control *hmaster, struct ptc_stats *stat)
+{
+ hmaster->max_bau_concurrent = 1;
+ bcp->timeout_tries++;
+ if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
+ bcp->timeout_tries = 0;
+ quiesce_local_uvhub(hmaster);
+ spin_lock(&hmaster->queue_lock);
+ uv_reset_with_ipi(&bau_desc->distribution, bcp->cpu);
+ spin_unlock(&hmaster->queue_lock);
+ end_uvhub_quiesce(hmaster);
+ bcp->ipi_attempts++;
+ stat->s_resets_timeout++;
+ }
+}
+
+/*
+ * Completions are taking a very long time due to a congested numalink
+ * network.
+ */
+static void
+disable_for_congestion(struct bau_control *bcp, struct ptc_stats *stat)
+{
+ int tcpu;
+ struct bau_control *tbcp;
+
+ /* let only one cpu do this disabling */
+ spin_lock(&disable_lock);
+ if (!baudisabled && bcp->period_requests &&
+ ((bcp->period_time / bcp->period_requests) > congested_cycles)) {
+ /* it becomes this cpu's job to turn on the use of the
+ BAU again */
+ baudisabled = 1;
+ bcp->set_bau_off = 1;
+ bcp->set_bau_on_time = get_cycles() +
+ sec_2_cycles(bcp->congested_period);
+ stat->s_bau_disabled++;
+ for_each_present_cpu(tcpu) {
+ tbcp = &per_cpu(bau_control, tcpu);
+ tbcp->baudisabled = 1;
+ }
+ }
+ spin_unlock(&disable_lock);
+}
+
/**
* uv_flush_send_and_wait
*
* Send a broadcast and wait for it to complete.
*
- * The flush_mask contains the cpus the broadcast is to be sent to, plus
+ * The flush_mask contains the cpus the broadcast is to be sent to including
* cpus that are on the local uvhub.
*
- * Returns NULL if all flushing represented in the mask was done. The mask
- * is zeroed.
- * Returns @flush_mask if some remote flushing remains to be done. The
- * mask will have some bits still set, representing any cpus on the local
- * uvhub (not current cpu) and any on remote uvhubs if the broadcast failed.
+ * Returns 0 if all flushing represented in the mask was done.
+ * Returns 1 if it gives up entirely and the original cpu mask is to be
+ * returned to the kernel.
*/
-const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc,
- struct cpumask *flush_mask,
- struct bau_control *bcp)
+int uv_flush_send_and_wait(struct bau_desc *bau_desc,
+ struct cpumask *flush_mask, struct bau_control *bcp)
{
int right_shift;
- int uvhub;
- int bit;
int completion_status = 0;
int seq_number = 0;
long try = 0;
int cpu = bcp->uvhub_cpu;
int this_cpu = bcp->cpu;
- int this_uvhub = bcp->uvhub;
unsigned long mmr_offset;
unsigned long index;
cycles_t time1;
cycles_t time2;
- struct ptc_stats *stat = &per_cpu(ptcstats, bcp->cpu);
+ cycles_t elapsed;
+ struct ptc_stats *stat = bcp->statp;
struct bau_control *smaster = bcp->socket_master;
struct bau_control *hmaster = bcp->uvhub_master;
- /*
- * Spin here while there are hmaster->max_concurrent or more active
- * descriptors. This is the per-uvhub 'throttle'.
- */
if (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
&hmaster->active_descriptor_count,
- hmaster->max_concurrent)) {
+ hmaster->max_bau_concurrent)) {
stat->s_throttles++;
do {
cpu_relax();
} while (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
&hmaster->active_descriptor_count,
- hmaster->max_concurrent));
+ hmaster->max_bau_concurrent));
}
-
while (hmaster->uvhub_quiesce)
cpu_relax();
@@ -557,23 +607,10 @@
}
time1 = get_cycles();
do {
- /*
- * Every message from any given cpu gets a unique message
- * sequence number. But retries use that same number.
- * Our message may have timed out at the destination because
- * all sw-ack resources are in use and there is a timeout
- * pending there. In that case, our last send never got
- * placed into the queue and we need to persist until it
- * does.
- *
- * Make any retry a type MSG_RETRY so that the destination will
- * free any resource held by a previous message from this cpu.
- */
if (try == 0) {
- /* use message type set by the caller the first time */
+ bau_desc->header.msg_type = MSG_REGULAR;
seq_number = bcp->message_number++;
} else {
- /* use RETRY type on all the rest; same sequence */
bau_desc->header.msg_type = MSG_RETRY;
stat->s_retry_messages++;
}
@@ -581,50 +618,17 @@
index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) |
bcp->uvhub_cpu;
bcp->send_message = get_cycles();
-
uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
-
try++;
completion_status = uv_wait_completion(bau_desc, mmr_offset,
right_shift, this_cpu, bcp, smaster, try);
if (completion_status == FLUSH_RETRY_PLUGGED) {
- /*
- * Our retries may be blocked by all destination swack
- * resources being consumed, and a timeout pending. In
- * that case hardware immediately returns the ERROR
- * that looks like a destination timeout.
- */
- udelay(TIMEOUT_DELAY);
- bcp->plugged_tries++;
- if (bcp->plugged_tries >= PLUGSB4RESET) {
- bcp->plugged_tries = 0;
- quiesce_local_uvhub(hmaster);
- spin_lock(&hmaster->queue_lock);
- uv_reset_with_ipi(&bau_desc->distribution,
- this_cpu);
- spin_unlock(&hmaster->queue_lock);
- end_uvhub_quiesce(hmaster);
- bcp->ipi_attempts++;
- stat->s_resets_plug++;
- }
+ destination_plugged(bau_desc, bcp, hmaster, stat);
} else if (completion_status == FLUSH_RETRY_TIMEOUT) {
- hmaster->max_concurrent = 1;
- bcp->timeout_tries++;
- udelay(TIMEOUT_DELAY);
- if (bcp->timeout_tries >= TIMEOUTSB4RESET) {
- bcp->timeout_tries = 0;
- quiesce_local_uvhub(hmaster);
- spin_lock(&hmaster->queue_lock);
- uv_reset_with_ipi(&bau_desc->distribution,
- this_cpu);
- spin_unlock(&hmaster->queue_lock);
- end_uvhub_quiesce(hmaster);
- bcp->ipi_attempts++;
- stat->s_resets_timeout++;
- }
+ destination_timeout(bau_desc, bcp, hmaster, stat);
}
- if (bcp->ipi_attempts >= 3) {
+ if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
bcp->ipi_attempts = 0;
completion_status = FLUSH_GIVEUP;
break;
@@ -633,49 +637,36 @@
} while ((completion_status == FLUSH_RETRY_PLUGGED) ||
(completion_status == FLUSH_RETRY_TIMEOUT));
time2 = get_cycles();
-
- if ((completion_status == FLUSH_COMPLETE) && (bcp->conseccompletes > 5)
- && (hmaster->max_concurrent < hmaster->max_concurrent_constant))
- hmaster->max_concurrent++;
-
- /*
- * hold any cpu not timing out here; no other cpu currently held by
- * the 'throttle' should enter the activation code
- */
+ bcp->plugged_tries = 0;
+ bcp->timeout_tries = 0;
+ if ((completion_status == FLUSH_COMPLETE) &&
+ (bcp->conseccompletes > bcp->complete_threshold) &&
+ (hmaster->max_bau_concurrent <
+ hmaster->max_bau_concurrent_constant))
+ hmaster->max_bau_concurrent++;
while (hmaster->uvhub_quiesce)
cpu_relax();
atomic_dec(&hmaster->active_descriptor_count);
-
- /* guard against cycles wrap */
- if (time2 > time1)
- stat->s_time += (time2 - time1);
- else
- stat->s_requestor--; /* don't count this one */
+ if (time2 > time1) {
+ elapsed = time2 - time1;
+ stat->s_time += elapsed;
+ if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
+ bcp->period_requests++;
+ bcp->period_time += elapsed;
+ if ((elapsed > congested_cycles) &&
+ (bcp->period_requests > bcp->congested_reps)) {
+ disable_for_congestion(bcp, stat);
+ }
+ }
+ } else
+ stat->s_requestor--;
if (completion_status == FLUSH_COMPLETE && try > 1)
stat->s_retriesok++;
else if (completion_status == FLUSH_GIVEUP) {
- /*
- * Cause the caller to do an IPI-style TLB shootdown on
- * the target cpu's, all of which are still in the mask.
- */
stat->s_giveup++;
- return flush_mask;
+ return 1;
}
-
- /*
- * Success, so clear the remote cpu's from the mask so we don't
- * use the IPI method of shootdown on them.
- */
- for_each_cpu(bit, flush_mask) {
- uvhub = uv_cpu_to_blade_id(bit);
- if (uvhub == this_uvhub)
- continue;
- cpumask_clear_cpu(bit, flush_mask);
- }
- if (!cpumask_empty(flush_mask))
- return flush_mask;
-
- return NULL;
+ return 0;
}
/**
@@ -707,70 +698,89 @@
struct mm_struct *mm,
unsigned long va, unsigned int cpu)
{
- int remotes;
int tcpu;
int uvhub;
int locals = 0;
+ int remotes = 0;
+ int hubs = 0;
struct bau_desc *bau_desc;
struct cpumask *flush_mask;
struct ptc_stats *stat;
struct bau_control *bcp;
+ struct bau_control *tbcp;
+ /* kernel was booted 'nobau' */
if (nobau)
return cpumask;
bcp = &per_cpu(bau_control, cpu);
+ stat = bcp->statp;
+
+ /* bau was disabled due to slow response */
+ if (bcp->baudisabled) {
+ /* the cpu that disabled it must re-enable it */
+ if (bcp->set_bau_off) {
+ if (get_cycles() >= bcp->set_bau_on_time) {
+ stat->s_bau_reenabled++;
+ baudisabled = 0;
+ for_each_present_cpu(tcpu) {
+ tbcp = &per_cpu(bau_control, tcpu);
+ tbcp->baudisabled = 0;
+ tbcp->period_requests = 0;
+ tbcp->period_time = 0;
+ }
+ }
+ }
+ return cpumask;
+ }
+
/*
* Each sending cpu has a per-cpu mask which it fills from the caller's
- * cpu mask. Only remote cpus are converted to uvhubs and copied.
+ * cpu mask. All cpus are converted to uvhubs and copied to the
+ * activation descriptor.
*/
flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
- /*
- * copy cpumask to flush_mask, removing current cpu
- * (current cpu should already have been flushed by the caller and
- * should never be returned if we return flush_mask)
- */
+ /* don't actually do a shootdown of the local cpu */
cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
if (cpu_isset(cpu, *cpumask))
- locals++; /* current cpu was targeted */
+ stat->s_ntargself++;
bau_desc = bcp->descriptor_base;
bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu;
-
bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
- remotes = 0;
+
+ /* cpu statistics */
for_each_cpu(tcpu, flush_mask) {
uvhub = uv_cpu_to_blade_id(tcpu);
- if (uvhub == bcp->uvhub) {
- locals++;
- continue;
- }
bau_uvhub_set(uvhub, &bau_desc->distribution);
- remotes++;
- }
- if (remotes == 0) {
- /*
- * No off_hub flushing; return status for local hub.
- * Return the caller's mask if all were local (the current
- * cpu may be in that mask).
- */
- if (locals)
- return cpumask;
+ if (uvhub == bcp->uvhub)
+ locals++;
else
- return NULL;
+ remotes++;
}
- stat = &per_cpu(ptcstats, cpu);
+ if ((locals + remotes) == 0)
+ return NULL;
stat->s_requestor++;
- stat->s_ntargcpu += remotes;
+ stat->s_ntargcpu += remotes + locals;
+ stat->s_ntargremotes += remotes;
+ stat->s_ntarglocals += locals;
remotes = bau_uvhub_weight(&bau_desc->distribution);
- stat->s_ntarguvhub += remotes;
- if (remotes >= 16)
+
+ /* uvhub statistics */
+ hubs = bau_uvhub_weight(&bau_desc->distribution);
+ if (locals) {
+ stat->s_ntarglocaluvhub++;
+ stat->s_ntargremoteuvhub += (hubs - 1);
+ } else
+ stat->s_ntargremoteuvhub += hubs;
+ stat->s_ntarguvhub += hubs;
+ if (hubs >= 16)
stat->s_ntarguvhub16++;
- else if (remotes >= 8)
+ else if (hubs >= 8)
stat->s_ntarguvhub8++;
- else if (remotes >= 4)
+ else if (hubs >= 4)
stat->s_ntarguvhub4++;
- else if (remotes >= 2)
+ else if (hubs >= 2)
stat->s_ntarguvhub2++;
else
stat->s_ntarguvhub1++;
@@ -779,10 +789,13 @@
bau_desc->payload.sending_cpu = cpu;
/*
- * uv_flush_send_and_wait returns null if all cpu's were messaged, or
- * the adjusted flush_mask if any cpu's were not messaged.
+ * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
+ * or 1 if it gave up and the original cpumask should be returned.
*/
- return uv_flush_send_and_wait(bau_desc, flush_mask, bcp);
+ if (!uv_flush_send_and_wait(bau_desc, flush_mask, bcp))
+ return NULL;
+ else
+ return cpumask;
}
/*
@@ -810,7 +823,7 @@
time_start = get_cycles();
bcp = &per_cpu(bau_control, smp_processor_id());
- stat = &per_cpu(ptcstats, smp_processor_id());
+ stat = bcp->statp;
msgdesc.va_queue_first = bcp->va_queue_first;
msgdesc.va_queue_last = bcp->va_queue_last;
msg = bcp->bau_msg_head;
@@ -908,12 +921,12 @@
}
static inline unsigned long long
-millisec_2_cycles(unsigned long millisec)
+microsec_2_cycles(unsigned long microsec)
{
unsigned long ns;
unsigned long long cyc;
- ns = millisec * 1000;
+ ns = microsec * 1000;
cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
return cyc;
}
@@ -931,15 +944,19 @@
if (!cpu) {
seq_printf(file,
- "# cpu sent stime numuvhubs numuvhubs16 numuvhubs8 ");
+ "# cpu sent stime self locals remotes ncpus localhub ");
seq_printf(file,
- "numuvhubs4 numuvhubs2 numuvhubs1 numcpus dto ");
+ "remotehub numuvhubs numuvhubs16 numuvhubs8 ");
+ seq_printf(file,
+ "numuvhubs4 numuvhubs2 numuvhubs1 dto ");
seq_printf(file,
"retries rok resetp resett giveup sto bz throt ");
seq_printf(file,
"sw_ack recv rtime all ");
seq_printf(file,
- "one mult none retry canc nocan reset rcan\n");
+ "one mult none retry canc nocan reset rcan ");
+ seq_printf(file,
+ "disable enable\n");
}
if (cpu < num_possible_cpus() && cpu_online(cpu)) {
stat = &per_cpu(ptcstats, cpu);
@@ -947,18 +964,23 @@
seq_printf(file,
"cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
cpu, stat->s_requestor, cycles_2_us(stat->s_time),
- stat->s_ntarguvhub, stat->s_ntarguvhub16,
+ stat->s_ntargself, stat->s_ntarglocals,
+ stat->s_ntargremotes, stat->s_ntargcpu,
+ stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub,
+ stat->s_ntarguvhub, stat->s_ntarguvhub16);
+ seq_printf(file, "%ld %ld %ld %ld %ld ",
stat->s_ntarguvhub8, stat->s_ntarguvhub4,
stat->s_ntarguvhub2, stat->s_ntarguvhub1,
- stat->s_ntargcpu, stat->s_dtimeout);
+ stat->s_dtimeout);
seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
stat->s_retry_messages, stat->s_retriesok,
stat->s_resets_plug, stat->s_resets_timeout,
stat->s_giveup, stat->s_stimeout,
stat->s_busy, stat->s_throttles);
+
/* destination side statistics */
seq_printf(file,
- "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n",
+ "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
uv_read_global_mmr64(uv_cpu_to_pnode(cpu),
UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE),
stat->d_requestee, cycles_2_us(stat->d_time),
@@ -966,15 +988,36 @@
stat->d_nomsg, stat->d_retries, stat->d_canceled,
stat->d_nocanceled, stat->d_resets,
stat->d_rcanceled);
+ seq_printf(file, "%ld %ld\n",
+ stat->s_bau_disabled, stat->s_bau_reenabled);
}
return 0;
}
/*
+ * Display the tunables thru debugfs
+ */
+static ssize_t tunables_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ char buf[300];
+ int ret;
+
+ ret = snprintf(buf, 300, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n",
+ "max_bau_concurrent plugged_delay plugsb4reset",
+ "timeoutsb4reset ipi_reset_limit complete_threshold",
+ "congested_response_us congested_reps congested_period",
+ max_bau_concurrent, plugged_delay, plugsb4reset,
+ timeoutsb4reset, ipi_reset_limit, complete_threshold,
+ congested_response_us, congested_reps, congested_period);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, ret);
+}
+
+/*
* -1: resetf the statistics
* 0: display meaning of the statistics
- * >0: maximum concurrent active descriptors per uvhub (throttle)
*/
static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
size_t count, loff_t *data)
@@ -983,7 +1026,6 @@
long input_arg;
char optstr[64];
struct ptc_stats *stat;
- struct bau_control *bcp;
if (count == 0 || count > sizeof(optstr))
return -EINVAL;
@@ -1059,32 +1101,161 @@
"reset: number of ipi-style reset requests processed\n");
printk(KERN_DEBUG
"rcan: number messages canceled by reset requests\n");
+ printk(KERN_DEBUG
+ "disable: number times use of the BAU was disabled\n");
+ printk(KERN_DEBUG
+ "enable: number times use of the BAU was re-enabled\n");
} else if (input_arg == -1) {
for_each_present_cpu(cpu) {
stat = &per_cpu(ptcstats, cpu);
memset(stat, 0, sizeof(struct ptc_stats));
}
- } else {
- uv_bau_max_concurrent = input_arg;
- bcp = &per_cpu(bau_control, smp_processor_id());
- if (uv_bau_max_concurrent < 1 ||
- uv_bau_max_concurrent > bcp->cpus_in_uvhub) {
- printk(KERN_DEBUG
- "Error: BAU max concurrent %d; %d is invalid\n",
- bcp->max_concurrent, uv_bau_max_concurrent);
- return -EINVAL;
- }
- printk(KERN_DEBUG "Set BAU max concurrent:%d\n",
- uv_bau_max_concurrent);
- for_each_present_cpu(cpu) {
- bcp = &per_cpu(bau_control, cpu);
- bcp->max_concurrent = uv_bau_max_concurrent;
- }
}
return count;
}
+static int local_atoi(const char *name)
+{
+ int val = 0;
+
+ for (;; name++) {
+ switch (*name) {
+ case '0' ... '9':
+ val = 10*val+(*name-'0');
+ break;
+ default:
+ return val;
+ }
+ }
+}
+
+/*
+ * set the tunables
+ * 0 values reset them to defaults
+ */
+static ssize_t tunables_write(struct file *file, const char __user *user,
+ size_t count, loff_t *data)
+{
+ int cpu;
+ int cnt = 0;
+ int val;
+ char *p;
+ char *q;
+ char instr[64];
+ struct bau_control *bcp;
+
+ if (count == 0 || count > sizeof(instr)-1)
+ return -EINVAL;
+ if (copy_from_user(instr, user, count))
+ return -EFAULT;
+
+ instr[count] = '\0';
+ /* count the fields */
+ p = instr + strspn(instr, WHITESPACE);
+ q = p;
+ for (; *p; p = q + strspn(q, WHITESPACE)) {
+ q = p + strcspn(p, WHITESPACE);
+ cnt++;
+ if (q == p)
+ break;
+ }
+ if (cnt != 9) {
+ printk(KERN_INFO "bau tunable error: should be 9 numbers\n");
+ return -EINVAL;
+ }
+
+ p = instr + strspn(instr, WHITESPACE);
+ q = p;
+ for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) {
+ q = p + strcspn(p, WHITESPACE);
+ val = local_atoi(p);
+ switch (cnt) {
+ case 0:
+ if (val == 0) {
+ max_bau_concurrent = MAX_BAU_CONCURRENT;
+ max_bau_concurrent_constant =
+ MAX_BAU_CONCURRENT;
+ continue;
+ }
+ bcp = &per_cpu(bau_control, smp_processor_id());
+ if (val < 1 || val > bcp->cpus_in_uvhub) {
+ printk(KERN_DEBUG
+ "Error: BAU max concurrent %d is invalid\n",
+ val);
+ return -EINVAL;
+ }
+ max_bau_concurrent = val;
+ max_bau_concurrent_constant = val;
+ continue;
+ case 1:
+ if (val == 0)
+ plugged_delay = PLUGGED_DELAY;
+ else
+ plugged_delay = val;
+ continue;
+ case 2:
+ if (val == 0)
+ plugsb4reset = PLUGSB4RESET;
+ else
+ plugsb4reset = val;
+ continue;
+ case 3:
+ if (val == 0)
+ timeoutsb4reset = TIMEOUTSB4RESET;
+ else
+ timeoutsb4reset = val;
+ continue;
+ case 4:
+ if (val == 0)
+ ipi_reset_limit = IPI_RESET_LIMIT;
+ else
+ ipi_reset_limit = val;
+ continue;
+ case 5:
+ if (val == 0)
+ complete_threshold = COMPLETE_THRESHOLD;
+ else
+ complete_threshold = val;
+ continue;
+ case 6:
+ if (val == 0)
+ congested_response_us = CONGESTED_RESPONSE_US;
+ else
+ congested_response_us = val;
+ continue;
+ case 7:
+ if (val == 0)
+ congested_reps = CONGESTED_REPS;
+ else
+ congested_reps = val;
+ continue;
+ case 8:
+ if (val == 0)
+ congested_period = CONGESTED_PERIOD;
+ else
+ congested_period = val;
+ continue;
+ }
+ if (q == p)
+ break;
+ }
+ for_each_present_cpu(cpu) {
+ bcp = &per_cpu(bau_control, cpu);
+ bcp->max_bau_concurrent = max_bau_concurrent;
+ bcp->max_bau_concurrent_constant = max_bau_concurrent;
+ bcp->plugged_delay = plugged_delay;
+ bcp->plugsb4reset = plugsb4reset;
+ bcp->timeoutsb4reset = timeoutsb4reset;
+ bcp->ipi_reset_limit = ipi_reset_limit;
+ bcp->complete_threshold = complete_threshold;
+ bcp->congested_response_us = congested_response_us;
+ bcp->congested_reps = congested_reps;
+ bcp->congested_period = congested_period;
+ }
+ return count;
+}
+
static const struct seq_operations uv_ptc_seq_ops = {
.start = uv_ptc_seq_start,
.next = uv_ptc_seq_next,
@@ -1097,6 +1268,11 @@
return seq_open(file, &uv_ptc_seq_ops);
}
+static int tunables_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
static const struct file_operations proc_uv_ptc_operations = {
.open = uv_ptc_proc_open,
.read = seq_read,
@@ -1105,6 +1281,12 @@
.release = seq_release,
};
+static const struct file_operations tunables_fops = {
+ .open = tunables_open,
+ .read = tunables_read,
+ .write = tunables_write,
+};
+
static int __init uv_ptc_init(void)
{
struct proc_dir_entry *proc_uv_ptc;
@@ -1119,6 +1301,20 @@
UV_PTC_BASENAME);
return -EINVAL;
}
+
+ tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL);
+ if (!tunables_dir) {
+ printk(KERN_ERR "unable to create debugfs directory %s\n",
+ UV_BAU_TUNABLES_DIR);
+ return -EINVAL;
+ }
+ tunables_file = debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600,
+ tunables_dir, NULL, &tunables_fops);
+ if (!tunables_file) {
+ printk(KERN_ERR "unable to create debugfs file %s\n",
+ UV_BAU_TUNABLES_FILE);
+ return -EINVAL;
+ }
return 0;
}
@@ -1259,15 +1455,44 @@
}
/*
+ * We will set BAU_MISC_CONTROL with a timeout period.
+ * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT.
+ * So the destination timeout period has be be calculated from them.
+ */
+static int
+calculate_destination_timeout(void)
+{
+ unsigned long mmr_image;
+ int mult1;
+ int mult2;
+ int index;
+ int base;
+ int ret;
+ unsigned long ts_ns;
+
+ mult1 = UV_INTD_SOFT_ACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK;
+ mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
+ index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK;
+ mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT);
+ mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK;
+ base = timeout_base_ns[index];
+ ts_ns = base * mult1 * mult2;
+ ret = ts_ns / 1000;
+ return ret;
+}
+
+/*
* initialize the bau_control structure for each cpu
*/
static void uv_init_per_cpu(int nuvhubs)
{
- int i, j, k;
+ int i;
int cpu;
int pnode;
int uvhub;
short socket = 0;
+ unsigned short socket_mask;
+ unsigned int uvhub_mask;
struct bau_control *bcp;
struct uvhub_desc *bdp;
struct socket_desc *sdp;
@@ -1278,7 +1503,7 @@
short cpu_number[16];
};
struct uvhub_desc {
- short num_sockets;
+ unsigned short socket_mask;
short num_cpus;
short uvhub;
short pnode;
@@ -1286,57 +1511,83 @@
};
struct uvhub_desc *uvhub_descs;
+ timeout_us = calculate_destination_timeout();
+
uvhub_descs = (struct uvhub_desc *)
kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
for_each_present_cpu(cpu) {
bcp = &per_cpu(bau_control, cpu);
memset(bcp, 0, sizeof(struct bau_control));
- spin_lock_init(&bcp->masks_lock);
- bcp->max_concurrent = uv_bau_max_concurrent;
pnode = uv_cpu_hub_info(cpu)->pnode;
uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
+ uvhub_mask |= (1 << uvhub);
bdp = &uvhub_descs[uvhub];
bdp->num_cpus++;
bdp->uvhub = uvhub;
bdp->pnode = pnode;
- /* time interval to catch a hardware stay-busy bug */
- bcp->timeout_interval = millisec_2_cycles(3);
- /* kludge: assume uv_hub.h is constant */
- socket = (cpu_physical_id(cpu)>>5)&1;
- if (socket >= bdp->num_sockets)
- bdp->num_sockets = socket+1;
+ /* kludge: 'assuming' one node per socket, and assuming that
+ disabling a socket just leaves a gap in node numbers */
+ socket = (cpu_to_node(cpu) & 1);;
+ bdp->socket_mask |= (1 << socket);
sdp = &bdp->socket[socket];
sdp->cpu_number[sdp->num_cpus] = cpu;
sdp->num_cpus++;
}
- socket = 0;
- for_each_possible_blade(uvhub) {
+ uvhub = 0;
+ while (uvhub_mask) {
+ if (!(uvhub_mask & 1))
+ goto nexthub;
bdp = &uvhub_descs[uvhub];
- for (i = 0; i < bdp->num_sockets; i++) {
- sdp = &bdp->socket[i];
- for (j = 0; j < sdp->num_cpus; j++) {
- cpu = sdp->cpu_number[j];
+ socket_mask = bdp->socket_mask;
+ socket = 0;
+ while (socket_mask) {
+ if (!(socket_mask & 1))
+ goto nextsocket;
+ sdp = &bdp->socket[socket];
+ for (i = 0; i < sdp->num_cpus; i++) {
+ cpu = sdp->cpu_number[i];
bcp = &per_cpu(bau_control, cpu);
bcp->cpu = cpu;
- if (j == 0) {
+ if (i == 0) {
smaster = bcp;
- if (i == 0)
+ if (socket == 0)
hmaster = bcp;
}
bcp->cpus_in_uvhub = bdp->num_cpus;
bcp->cpus_in_socket = sdp->num_cpus;
bcp->socket_master = smaster;
+ bcp->uvhub = bdp->uvhub;
bcp->uvhub_master = hmaster;
- for (k = 0; k < DEST_Q_SIZE; k++)
- bcp->socket_acknowledge_count[k] = 0;
- bcp->uvhub_cpu =
- uv_cpu_hub_info(cpu)->blade_processor_id;
+ bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->
+ blade_processor_id;
}
+nextsocket:
socket++;
+ socket_mask = (socket_mask >> 1);
}
+nexthub:
+ uvhub++;
+ uvhub_mask = (uvhub_mask >> 1);
}
kfree(uvhub_descs);
+ for_each_present_cpu(cpu) {
+ bcp = &per_cpu(bau_control, cpu);
+ bcp->baudisabled = 0;
+ bcp->statp = &per_cpu(ptcstats, cpu);
+ /* time interval to catch a hardware stay-busy bug */
+ bcp->timeout_interval = microsec_2_cycles(2*timeout_us);
+ bcp->max_bau_concurrent = max_bau_concurrent;
+ bcp->max_bau_concurrent_constant = max_bau_concurrent;
+ bcp->plugged_delay = plugged_delay;
+ bcp->plugsb4reset = plugsb4reset;
+ bcp->timeoutsb4reset = timeoutsb4reset;
+ bcp->ipi_reset_limit = ipi_reset_limit;
+ bcp->complete_threshold = complete_threshold;
+ bcp->congested_response_us = congested_response_us;
+ bcp->congested_reps = congested_reps;
+ bcp->congested_period = congested_period;
+ }
}
/*
@@ -1361,10 +1612,11 @@
zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu),
GFP_KERNEL, cpu_to_node(cur_cpu));
- uv_bau_max_concurrent = MAX_BAU_CONCURRENT;
uv_nshift = uv_hub_info->m_val;
uv_mmask = (1UL << uv_hub_info->m_val) - 1;
nuvhubs = uv_num_possible_blades();
+ spin_lock_init(&disable_lock);
+ congested_cycles = microsec_2_cycles(congested_response_us);
uv_init_per_cpu(nuvhubs);
@@ -1383,15 +1635,19 @@
alloc_intr_gate(vector, uv_bau_message_intr1);
for_each_possible_blade(uvhub) {
- pnode = uv_blade_to_pnode(uvhub);
- /* INIT the bau */
- uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_ACTIVATION_CONTROL,
- ((unsigned long)1 << 63));
- mmr = 1; /* should be 1 to broadcast to both sockets */
- uv_write_global_mmr64(pnode, UVH_BAU_DATA_BROADCAST, mmr);
+ if (uv_blade_nr_possible_cpus(uvhub)) {
+ pnode = uv_blade_to_pnode(uvhub);
+ /* INIT the bau */
+ uv_write_global_mmr64(pnode,
+ UVH_LB_BAU_SB_ACTIVATION_CONTROL,
+ ((unsigned long)1 << 63));
+ mmr = 1; /* should be 1 to broadcast to both sockets */
+ uv_write_global_mmr64(pnode, UVH_BAU_DATA_BROADCAST,
+ mmr);
+ }
}
return 0;
}
core_initcall(uv_bau_init);
-core_initcall(uv_ptc_init);
+fs_initcall(uv_ptc_init);
diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
index 4a5979a..2cda60a 100644
--- a/arch/x86/lib/atomic64_386_32.S
+++ b/arch/x86/lib/atomic64_386_32.S
@@ -25,150 +25,172 @@
CFI_ADJUST_CFA_OFFSET -4
.endm
-.macro BEGIN func reg
-$v = \reg
+#define BEGIN(op) \
+.macro endp; \
+ CFI_ENDPROC; \
+ENDPROC(atomic64_##op##_386); \
+.purgem endp; \
+.endm; \
+ENTRY(atomic64_##op##_386); \
+ CFI_STARTPROC; \
+ LOCK v;
-ENTRY(atomic64_\func\()_386)
- CFI_STARTPROC
- LOCK $v
+#define ENDP endp
-.macro RETURN
- UNLOCK $v
+#define RET \
+ UNLOCK v; \
ret
-.endm
-.macro END_
- CFI_ENDPROC
-ENDPROC(atomic64_\func\()_386)
-.purgem RETURN
-.purgem END_
-.purgem END
-.endm
+#define RET_ENDP \
+ RET; \
+ ENDP
-.macro END
-RETURN
-END_
-.endm
-.endm
+#define v %ecx
+BEGIN(read)
+ movl (v), %eax
+ movl 4(v), %edx
+RET_ENDP
+#undef v
-BEGIN read %ecx
- movl ($v), %eax
- movl 4($v), %edx
-END
+#define v %esi
+BEGIN(set)
+ movl %ebx, (v)
+ movl %ecx, 4(v)
+RET_ENDP
+#undef v
-BEGIN set %esi
- movl %ebx, ($v)
- movl %ecx, 4($v)
-END
+#define v %esi
+BEGIN(xchg)
+ movl (v), %eax
+ movl 4(v), %edx
+ movl %ebx, (v)
+ movl %ecx, 4(v)
+RET_ENDP
+#undef v
-BEGIN xchg %esi
- movl ($v), %eax
- movl 4($v), %edx
- movl %ebx, ($v)
- movl %ecx, 4($v)
-END
+#define v %ecx
+BEGIN(add)
+ addl %eax, (v)
+ adcl %edx, 4(v)
+RET_ENDP
+#undef v
-BEGIN add %ecx
- addl %eax, ($v)
- adcl %edx, 4($v)
-END
+#define v %ecx
+BEGIN(add_return)
+ addl (v), %eax
+ adcl 4(v), %edx
+ movl %eax, (v)
+ movl %edx, 4(v)
+RET_ENDP
+#undef v
-BEGIN add_return %ecx
- addl ($v), %eax
- adcl 4($v), %edx
- movl %eax, ($v)
- movl %edx, 4($v)
-END
+#define v %ecx
+BEGIN(sub)
+ subl %eax, (v)
+ sbbl %edx, 4(v)
+RET_ENDP
+#undef v
-BEGIN sub %ecx
- subl %eax, ($v)
- sbbl %edx, 4($v)
-END
-
-BEGIN sub_return %ecx
+#define v %ecx
+BEGIN(sub_return)
negl %edx
negl %eax
sbbl $0, %edx
- addl ($v), %eax
- adcl 4($v), %edx
- movl %eax, ($v)
- movl %edx, 4($v)
-END
+ addl (v), %eax
+ adcl 4(v), %edx
+ movl %eax, (v)
+ movl %edx, 4(v)
+RET_ENDP
+#undef v
-BEGIN inc %esi
- addl $1, ($v)
- adcl $0, 4($v)
-END
+#define v %esi
+BEGIN(inc)
+ addl $1, (v)
+ adcl $0, 4(v)
+RET_ENDP
+#undef v
-BEGIN inc_return %esi
- movl ($v), %eax
- movl 4($v), %edx
+#define v %esi
+BEGIN(inc_return)
+ movl (v), %eax
+ movl 4(v), %edx
addl $1, %eax
adcl $0, %edx
- movl %eax, ($v)
- movl %edx, 4($v)
-END
+ movl %eax, (v)
+ movl %edx, 4(v)
+RET_ENDP
+#undef v
-BEGIN dec %esi
- subl $1, ($v)
- sbbl $0, 4($v)
-END
+#define v %esi
+BEGIN(dec)
+ subl $1, (v)
+ sbbl $0, 4(v)
+RET_ENDP
+#undef v
-BEGIN dec_return %esi
- movl ($v), %eax
- movl 4($v), %edx
+#define v %esi
+BEGIN(dec_return)
+ movl (v), %eax
+ movl 4(v), %edx
subl $1, %eax
sbbl $0, %edx
- movl %eax, ($v)
- movl %edx, 4($v)
-END
+ movl %eax, (v)
+ movl %edx, 4(v)
+RET_ENDP
+#undef v
-BEGIN add_unless %ecx
+#define v %ecx
+BEGIN(add_unless)
addl %eax, %esi
adcl %edx, %edi
- addl ($v), %eax
- adcl 4($v), %edx
+ addl (v), %eax
+ adcl 4(v), %edx
cmpl %eax, %esi
je 3f
1:
- movl %eax, ($v)
- movl %edx, 4($v)
+ movl %eax, (v)
+ movl %edx, 4(v)
movl $1, %eax
2:
-RETURN
+ RET
3:
cmpl %edx, %edi
jne 1b
xorl %eax, %eax
jmp 2b
-END_
+ENDP
+#undef v
-BEGIN inc_not_zero %esi
- movl ($v), %eax
- movl 4($v), %edx
+#define v %esi
+BEGIN(inc_not_zero)
+ movl (v), %eax
+ movl 4(v), %edx
testl %eax, %eax
je 3f
1:
addl $1, %eax
adcl $0, %edx
- movl %eax, ($v)
- movl %edx, 4($v)
+ movl %eax, (v)
+ movl %edx, 4(v)
movl $1, %eax
2:
-RETURN
+ RET
3:
testl %edx, %edx
jne 1b
jmp 2b
-END_
+ENDP
+#undef v
-BEGIN dec_if_positive %esi
- movl ($v), %eax
- movl 4($v), %edx
+#define v %esi
+BEGIN(dec_if_positive)
+ movl (v), %eax
+ movl 4(v), %edx
subl $1, %eax
sbbl $0, %edx
js 1f
- movl %eax, ($v)
- movl %edx, 4($v)
+ movl %eax, (v)
+ movl %edx, 4(v)
1:
-END
+RET_ENDP
+#undef v
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index f627779..4c4508e 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -802,8 +802,10 @@
up_read(&mm->mmap_sem);
/* Kernel mode? Handle exceptions or die: */
- if (!(error_code & PF_USER))
+ if (!(error_code & PF_USER)) {
no_context(regs, error_code, address);
+ return;
+ }
/* User-space => ok to do another page fault: */
if (is_prefetch(regs, error_code, address))
diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/isdn/hardware/avm/c4.c
index 7715d32..d3530f6 100644
--- a/drivers/isdn/hardware/avm/c4.c
+++ b/drivers/isdn/hardware/avm/c4.c
@@ -1273,6 +1273,7 @@
if (retval != 0) {
printk(KERN_ERR "c4: no AVM-C%d at i/o %#x, irq %d detected, mem %#x\n",
nr, param.port, param.irq, param.membase);
+ pci_disable_device(dev);
return -ENODEV;
}
return 0;
diff --git a/drivers/isdn/hardware/avm/t1pci.c b/drivers/isdn/hardware/avm/t1pci.c
index 5a3f830..a79eb5a 100644
--- a/drivers/isdn/hardware/avm/t1pci.c
+++ b/drivers/isdn/hardware/avm/t1pci.c
@@ -210,6 +210,7 @@
if (retval != 0) {
printk(KERN_ERR "t1pci: no AVM-T1-PCI at i/o %#x, irq %d detected, mem %#x\n",
param.port, param.irq, param.membase);
+ pci_disable_device(dev);
return -ENODEV;
}
return 0;
diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
index d2dd61d..af25e1f 100644
--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
@@ -1094,6 +1094,7 @@
pr_info("mISDN: do not have informations about adapter at %s\n",
pci_name(pdev));
kfree(card);
+ pci_disable_device(pdev);
return -EINVAL;
} else
pr_notice("mISDN: found adapter %s at %s\n",
@@ -1103,7 +1104,7 @@
pci_set_drvdata(pdev, card);
err = setup_instance(card);
if (err) {
- pci_disable_device(card->pdev);
+ pci_disable_device(pdev);
kfree(card);
pci_set_drvdata(pdev, NULL);
} else if (ent->driver_data == INF_SCT_1) {
@@ -1114,6 +1115,7 @@
sc = kzalloc(sizeof(struct inf_hw), GFP_KERNEL);
if (!sc) {
release_card(card);
+ pci_disable_device(pdev);
return -ENOMEM;
}
sc->irq = card->irq;
@@ -1121,6 +1123,7 @@
sc->ci = card->ci + i;
err = setup_instance(sc);
if (err) {
+ pci_disable_device(pdev);
kfree(sc);
release_card(card);
break;
diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c
index 077ccf8..2111dbf 100644
--- a/drivers/net/caif/caif_spi_slave.c
+++ b/drivers/net/caif/caif_spi_slave.c
@@ -22,13 +22,13 @@
#include <net/caif/caif_spi.h>
#ifndef CONFIG_CAIF_SPI_SYNC
-#define SPI_DATA_POS SPI_CMD_SZ
+#define SPI_DATA_POS 0
static inline int forward_to_spi_cmd(struct cfspi *cfspi)
{
return cfspi->rx_cpck_len;
}
#else
-#define SPI_DATA_POS 0
+#define SPI_DATA_POS SPI_CMD_SZ
static inline int forward_to_spi_cmd(struct cfspi *cfspi)
{
return 0;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index a527e37..eb799b3 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -5,7 +5,7 @@
menuconfig PHYLIB
tristate "PHY Device support and infrastructure"
depends on !S390
- depends on NET_ETHERNET
+ depends on NETDEVICES
help
Ethernet controllers are usually attached to PHY
devices. This option provides infrastructure for
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 5130db8..1bb16cb7 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -301,7 +301,7 @@
/**
* phy_mii_ioctl - generic PHY MII ioctl interface
* @phydev: the phy_device struct
- * @mii_data: MII ioctl data
+ * @ifr: &struct ifreq for socket ioctl's
* @cmd: ioctl cmd to execute
*
* Note that this function is currently incompatible with the
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index b9615bd..bf6d87a 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -473,48 +473,58 @@
static int
qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
{
- struct qlcnic_pci_info pci_info[QLCNIC_MAX_PCI_FUNC];
+ struct qlcnic_pci_info *pci_info;
int i, ret = 0, err;
u8 pfn;
- if (!adapter->npars)
- adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
- QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
- if (!adapter->npars)
+ pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+ if (!pci_info)
return -ENOMEM;
- if (!adapter->eswitch)
- adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
+ adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
+ QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
+ if (!adapter->npars) {
+ err = -ENOMEM;
+ goto err_pci_info;
+ }
+
+ adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
if (!adapter->eswitch) {
err = -ENOMEM;
- goto err_eswitch;
+ goto err_npars;
}
ret = qlcnic_get_pci_info(adapter, pci_info);
- if (!ret) {
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
- pfn = pci_info[i].id;
- if (pfn > QLCNIC_MAX_PCI_FUNC)
- return QL_STATUS_INVALID_PARAM;
- adapter->npars[pfn].active = pci_info[i].active;
- adapter->npars[pfn].type = pci_info[i].type;
- adapter->npars[pfn].phy_port = pci_info[i].default_port;
- adapter->npars[pfn].mac_learning = DEFAULT_MAC_LEARN;
- adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
- adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
- }
+ if (ret)
+ goto err_eswitch;
- for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
- adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
-
- return ret;
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ pfn = pci_info[i].id;
+ if (pfn > QLCNIC_MAX_PCI_FUNC)
+ return QL_STATUS_INVALID_PARAM;
+ adapter->npars[pfn].active = pci_info[i].active;
+ adapter->npars[pfn].type = pci_info[i].type;
+ adapter->npars[pfn].phy_port = pci_info[i].default_port;
+ adapter->npars[pfn].mac_learning = DEFAULT_MAC_LEARN;
+ adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
+ adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
}
+ for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
+ adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
+
+ kfree(pci_info);
+ return 0;
+
+err_eswitch:
kfree(adapter->eswitch);
adapter->eswitch = NULL;
-err_eswitch:
+err_npars:
kfree(adapter->npars);
+ adapter->npars = NULL;
+err_pci_info:
+ kfree(pci_info);
return ret;
}
@@ -3361,15 +3371,21 @@
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
- struct qlcnic_pci_info pci_info[QLCNIC_MAX_PCI_FUNC];
+ struct qlcnic_pci_info *pci_info;
int i, ret;
if (size != sizeof(pci_cfg))
return QL_STATUS_INVALID_PARAM;
+ pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+ if (!pci_info)
+ return -ENOMEM;
+
ret = qlcnic_get_pci_info(adapter, pci_info);
- if (ret)
+ if (ret) {
+ kfree(pci_info);
return ret;
+ }
for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
pci_cfg[i].pci_func = pci_info[i].id;
@@ -3380,8 +3396,8 @@
memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
}
memcpy(buf, &pci_cfg, size);
+ kfree(pci_info);
return size;
-
}
static struct bin_attribute bin_attr_npar_config = {
.attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 7f62e2d..ca7fc9d 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -315,7 +315,7 @@
static void rx_complete (struct urb *urb);
-static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
+static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
{
struct sk_buff *skb;
struct skb_data *entry;
@@ -327,7 +327,7 @@
netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
usb_free_urb (urb);
- return;
+ return -ENOMEM;
}
skb_reserve (skb, NET_IP_ALIGN);
@@ -357,6 +357,9 @@
netif_dbg(dev, ifdown, dev->net, "device gone\n");
netif_device_detach (dev->net);
break;
+ case -EHOSTUNREACH:
+ retval = -ENOLINK;
+ break;
default:
netif_dbg(dev, rx_err, dev->net,
"rx submit, %d\n", retval);
@@ -374,6 +377,7 @@
dev_kfree_skb_any (skb);
usb_free_urb (urb);
}
+ return retval;
}
@@ -912,6 +916,7 @@
/* tasklet could resubmit itself forever if memory is tight */
if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
struct urb *urb = NULL;
+ int resched = 1;
if (netif_running (dev->net))
urb = usb_alloc_urb (0, GFP_KERNEL);
@@ -922,10 +927,12 @@
status = usb_autopm_get_interface(dev->intf);
if (status < 0)
goto fail_lowmem;
- rx_submit (dev, urb, GFP_KERNEL);
+ if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
+ resched = 0;
usb_autopm_put_interface(dev->intf);
fail_lowmem:
- tasklet_schedule (&dev->bh);
+ if (resched)
+ tasklet_schedule (&dev->bh);
}
}
@@ -1175,8 +1182,11 @@
// don't refill the queue all at once
for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
urb = usb_alloc_urb (0, GFP_ATOMIC);
- if (urb != NULL)
- rx_submit (dev, urb, GFP_ATOMIC);
+ if (urb != NULL) {
+ if (rx_submit (dev, urb, GFP_ATOMIC) ==
+ -ENOLINK)
+ return;
+ }
}
if (temp != dev->rxq.qlen)
netif_dbg(dev, link, dev->net,
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index ad7719f..e050bd6 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -885,20 +885,21 @@
* Receive a frame through the DMA
*/
static inline void
-fst_rx_dma(struct fst_card_info *card, unsigned char *skb,
- unsigned char *mem, int len)
+fst_rx_dma(struct fst_card_info *card, dma_addr_t skb,
+ dma_addr_t mem, int len)
{
/*
* This routine will setup the DMA and start it
*/
- dbg(DBG_RX, "In fst_rx_dma %p %p %d\n", skb, mem, len);
+ dbg(DBG_RX, "In fst_rx_dma %lx %lx %d\n",
+ (unsigned long) skb, (unsigned long) mem, len);
if (card->dmarx_in_progress) {
dbg(DBG_ASS, "In fst_rx_dma while dma in progress\n");
}
- outl((unsigned long) skb, card->pci_conf + DMAPADR0); /* Copy to here */
- outl((unsigned long) mem, card->pci_conf + DMALADR0); /* from here */
+ outl(skb, card->pci_conf + DMAPADR0); /* Copy to here */
+ outl(mem, card->pci_conf + DMALADR0); /* from here */
outl(len, card->pci_conf + DMASIZ0); /* for this length */
outl(0x00000000c, card->pci_conf + DMADPR0); /* In this direction */
@@ -1309,8 +1310,8 @@
card->dma_port_rx = port;
card->dma_len_rx = len;
card->dma_rxpos = rxp;
- fst_rx_dma(card, (char *) card->rx_dma_handle_card,
- (char *) BUF_OFFSET(rxBuffer[pi][rxp][0]), len);
+ fst_rx_dma(card, card->rx_dma_handle_card,
+ BUF_OFFSET(rxBuffer[pi][rxp][0]), len);
}
if (rxp != port->rxpos) {
dbg(DBG_ASS, "About to increment rxpos by more than 1\n");
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 8848333..fec0262 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -260,7 +260,7 @@
.shadow_ram_support = false,
.ht_greenfield_support = true,
.led_compensation = 51,
- .use_rts_for_ht = true, /* use rts/cts protection */
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index a07310f..6950a78 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -769,22 +769,6 @@
rts_retry_limit = data_retry_limit;
tx_cmd->rts_retry_limit = rts_retry_limit;
- if (ieee80211_is_mgmt(fc)) {
- switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
- case cpu_to_le16(IEEE80211_STYPE_AUTH):
- case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
- case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
- case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
- if (tx_flags & TX_CMD_FLG_RTS_MSK) {
- tx_flags &= ~TX_CMD_FLG_RTS_MSK;
- tx_flags |= TX_CMD_FLG_CTS_MSK;
- }
- break;
- default:
- break;
- }
- }
-
tx_cmd->rate = rate;
tx_cmd->tx_flags = tx_flags;
@@ -2717,7 +2701,7 @@
static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
.get_hcmd_size = iwl3945_get_hcmd_size,
.build_addsta_hcmd = iwl3945_build_addsta_hcmd,
- .rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
+ .tx_cmd_protection = iwlcore_tx_cmd_protection,
.request_scan = iwl3945_request_scan,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index d6531ad..d6da356 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -2223,7 +2223,7 @@
.build_addsta_hcmd = iwl4965_build_addsta_hcmd,
.chain_noise_reset = iwl4965_chain_noise_reset,
.gain_computation = iwl4965_gain_computation,
- .rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
+ .tx_cmd_protection = iwlcore_tx_cmd_protection,
.calc_rssi = iwl4965_calc_rssi,
.request_scan = iwlagn_request_scan,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 8093ce2..aacf377 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -506,7 +506,7 @@
.use_bsm = false,
.ht_greenfield_support = true,
.led_compensation = 51,
- .use_rts_for_ht = true, /* use rts/cts protection */
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
@@ -537,7 +537,7 @@
.use_bsm = false,
.ht_greenfield_support = true,
.led_compensation = 51,
- .use_rts_for_ht = true, /* use rts/cts protection */
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
@@ -597,7 +597,7 @@
.use_bsm = false,
.ht_greenfield_support = true,
.led_compensation = 51,
- .use_rts_for_ht = true, /* use rts/cts protection */
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
@@ -628,7 +628,7 @@
.use_bsm = false,
.ht_greenfield_support = true,
.led_compensation = 51,
- .use_rts_for_ht = true, /* use rts/cts protection */
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
@@ -659,7 +659,7 @@
.use_bsm = false,
.ht_greenfield_support = true,
.led_compensation = 51,
- .use_rts_for_ht = true, /* use rts/cts protection */
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 5827052..af4fd50 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -381,7 +381,7 @@
.shadow_ram_support = true,
.ht_greenfield_support = true,
.led_compensation = 51,
- .use_rts_for_ht = true, /* use rts/cts protection */
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.supports_idle = true,
.adv_thermal_throttle = true,
@@ -489,7 +489,7 @@
.shadow_ram_support = true,
.ht_greenfield_support = true,
.led_compensation = 51,
- .use_rts_for_ht = true, /* use rts/cts protection */
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.supports_idle = true,
.adv_thermal_throttle = true,
@@ -563,7 +563,7 @@
.shadow_ram_support = true,
.ht_greenfield_support = true,
.led_compensation = 51,
- .use_rts_for_ht = true, /* use rts/cts protection */
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.supports_idle = true,
.adv_thermal_throttle = true,
@@ -637,7 +637,7 @@
.shadow_ram_support = true,
.ht_greenfield_support = true,
.led_compensation = 51,
- .use_rts_for_ht = true, /* use rts/cts protection */
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.supports_idle = true,
.adv_thermal_throttle = true,
@@ -714,7 +714,7 @@
.shadow_ram_support = true,
.ht_greenfield_support = true,
.led_compensation = 51,
- .use_rts_for_ht = true, /* use rts/cts protection */
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.supports_idle = true,
.adv_thermal_throttle = true,
@@ -821,7 +821,7 @@
.shadow_ram_support = true,
.ht_greenfield_support = true,
.led_compensation = 51,
- .use_rts_for_ht = true, /* use rts/cts protection */
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.supports_idle = true,
.adv_thermal_throttle = true,
@@ -859,7 +859,7 @@
.shadow_ram_support = true,
.ht_greenfield_support = true,
.led_compensation = 51,
- .use_rts_for_ht = true, /* use rts/cts protection */
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.supports_idle = true,
.adv_thermal_throttle = true,
@@ -933,7 +933,7 @@
.shadow_ram_support = true,
.ht_greenfield_support = true,
.led_compensation = 51,
- .use_rts_for_ht = true, /* use rts/cts protection */
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.supports_idle = true,
.adv_thermal_throttle = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
index a7216dd..75b901b3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
@@ -211,10 +211,21 @@
}
}
-static void iwlagn_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
- __le32 *tx_flags)
+static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
+ struct ieee80211_tx_info *info,
+ __le16 fc, __le32 *tx_flags)
{
- *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
+ if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS ||
+ info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
+ return;
+ }
+
+ if (priv->cfg->use_rts_for_aggregation &&
+ info->flags & IEEE80211_TX_CTL_AMPDU) {
+ *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
+ return;
+ }
}
/* Calc max signal level (dBm) among 3 possible receivers */
@@ -268,7 +279,7 @@
.build_addsta_hcmd = iwlagn_build_addsta_hcmd,
.gain_computation = iwlagn_gain_computation,
.chain_noise_reset = iwlagn_chain_noise_reset,
- .rts_tx_cmd_flag = iwlagn_rts_tx_cmd_flag,
+ .tx_cmd_protection = iwlagn_tx_cmd_protection,
.calc_rssi = iwlagn_calc_rssi,
.request_scan = iwlagn_request_scan,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index d04502d..69155aa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -379,10 +379,7 @@
tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
}
- priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
-
- if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
- tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+ priv->cfg->ops->utils->tx_cmd_protection(priv, info, fc, &tx_flags);
tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
if (ieee80211_is_mgmt(fc)) {
@@ -456,21 +453,6 @@
if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
rate_flags |= RATE_MCS_CCK_MSK;
- /* Set up RTS and CTS flags for certain packets */
- switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
- case cpu_to_le16(IEEE80211_STYPE_AUTH):
- case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
- case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
- case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
- if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
- tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
- tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
- }
- break;
- default:
- break;
- }
-
/* Set up antennas */
priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
priv->hw_params.valid_tx_ant);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 35337b1e..c1882fd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -202,13 +202,6 @@
priv->start_calib = 0;
if (new_assoc) {
- /*
- * allow CTS-to-self if possible for new association.
- * this is relevant only for 5000 series and up,
- * but will not damage 4965
- */
- priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN;
-
/* Apply the new configuration
* RXON assoc doesn't clear the station table in uCode,
*/
@@ -1618,45 +1611,9 @@
static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
-static ssize_t show_rts_ht_protection(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- return sprintf(buf, "%s\n",
- priv->cfg->use_rts_for_ht ? "RTS/CTS" : "CTS-to-self");
-}
-
-static ssize_t store_rts_ht_protection(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- unsigned long val;
- int ret;
-
- ret = strict_strtoul(buf, 10, &val);
- if (ret)
- IWL_INFO(priv, "Input is not in decimal form.\n");
- else {
- if (!iwl_is_associated(priv))
- priv->cfg->use_rts_for_ht = val ? true : false;
- else
- IWL_ERR(priv, "Sta associated with AP - "
- "Change protection mechanism is not allowed\n");
- ret = count;
- }
- return ret;
-}
-
-static DEVICE_ATTR(rts_ht_protection, S_IWUSR | S_IRUGO,
- show_rts_ht_protection, store_rts_ht_protection);
-
-
static struct attribute *iwl_sysfs_entries[] = {
&dev_attr_temperature.attr,
&dev_attr_tx_power.attr,
- &dev_attr_rts_ht_protection.attr,
#ifdef CONFIG_IWLWIFI_DEBUG
&dev_attr_debug_level.attr,
#endif
@@ -3464,25 +3421,6 @@
return ret;
}
-/*
- * switch to RTS/CTS for TX
- */
-static void iwl_enable_rts_cts(struct iwl_priv *priv)
-{
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
- if (!test_bit(STATUS_SCANNING, &priv->status)) {
- IWL_DEBUG_INFO(priv, "use RTS/CTS protection\n");
- iwlcore_commit_rxon(priv);
- } else {
- /* scanning, defer the request until scan completed */
- IWL_DEBUG_INFO(priv, "defer setting RTS/CTS protection\n");
- }
-}
-
static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
@@ -3529,14 +3467,33 @@
}
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
ret = 0;
+ if (priv->cfg->use_rts_for_aggregation) {
+ struct iwl_station_priv *sta_priv =
+ (void *) sta->drv_priv;
+ /*
+ * switch off RTS/CTS if it was previously enabled
+ */
+
+ sta_priv->lq_sta.lq.general_params.flags &=
+ ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
+ iwl_send_lq_cmd(priv, &sta_priv->lq_sta.lq,
+ CMD_ASYNC, false);
+ }
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
- if (priv->cfg->use_rts_for_ht) {
+ if (priv->cfg->use_rts_for_aggregation) {
+ struct iwl_station_priv *sta_priv =
+ (void *) sta->drv_priv;
+
/*
* switch to RTS/CTS if it is the prefer protection
* method for HT traffic
*/
- iwl_enable_rts_cts(priv);
+
+ sta_priv->lq_sta.lq.general_params.flags |=
+ LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
+ iwl_send_lq_cmd(priv, &sta_priv->lq_sta.lq,
+ CMD_ASYNC, false);
}
ret = 0;
break;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 8ccb6d2..2c03c6e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -401,21 +401,38 @@
EXPORT_SYMBOL(iwlcore_free_geos);
/*
- * iwlcore_rts_tx_cmd_flag: Set rts/cts. 3945 and 4965 only share this
+ * iwlcore_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
* function.
*/
-void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
- __le32 *tx_flags)
+void iwlcore_tx_cmd_protection(struct iwl_priv *priv,
+ struct ieee80211_tx_info *info,
+ __le16 fc, __le32 *tx_flags)
{
if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
*tx_flags |= TX_CMD_FLG_RTS_MSK;
*tx_flags &= ~TX_CMD_FLG_CTS_MSK;
+ *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+
+ if (!ieee80211_is_mgmt(fc))
+ return;
+
+ switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+ case cpu_to_le16(IEEE80211_STYPE_AUTH):
+ case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+ case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+ case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+ *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+ *tx_flags |= TX_CMD_FLG_CTS_MSK;
+ break;
+ }
} else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
*tx_flags &= ~TX_CMD_FLG_RTS_MSK;
*tx_flags |= TX_CMD_FLG_CTS_MSK;
+ *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
}
}
-EXPORT_SYMBOL(iwlcore_rts_tx_cmd_flag);
+EXPORT_SYMBOL(iwlcore_tx_cmd_protection);
+
static bool is_single_rx_stream(struct iwl_priv *priv)
{
@@ -1869,6 +1886,10 @@
priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
else
priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
+ if (bss_conf->use_cts_prot)
+ priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN;
+ else
+ priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
}
if (changes & BSS_CHANGED_BASIC_RATES) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index e9d23f2..4a71dfb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -104,8 +104,9 @@
u32 min_average_noise,
u8 default_chain);
void (*chain_noise_reset)(struct iwl_priv *priv);
- void (*rts_tx_cmd_flag)(struct ieee80211_tx_info *info,
- __le32 *tx_flags);
+ void (*tx_cmd_protection)(struct iwl_priv *priv,
+ struct ieee80211_tx_info *info,
+ __le16 fc, __le32 *tx_flags);
int (*calc_rssi)(struct iwl_priv *priv,
struct iwl_rx_phy_res *rx_resp);
void (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
@@ -249,7 +250,7 @@
* @led_compensation: compensate on the led on/off time per HW according
* to the deviation to achieve the desired led frequency.
* The detail algorithm is described in iwl-led.c
- * @use_rts_for_ht: use rts/cts protection for HT traffic
+ * @use_rts_for_aggregation: use rts/cts protection for HT traffic
* @chain_noise_num_beacons: number of beacons used to compute chain noise
* @adv_thermal_throttle: support advance thermal throttle
* @support_ct_kill_exit: support ct kill exit condition
@@ -318,7 +319,7 @@
const bool ht_greenfield_support;
u16 led_compensation;
const bool broken_powersave;
- bool use_rts_for_ht;
+ bool use_rts_for_aggregation;
int chain_noise_num_beacons;
const bool supports_idle;
bool adv_thermal_throttle;
@@ -390,8 +391,9 @@
void iwl_mac_reset_tsf(struct ieee80211_hw *hw);
int iwl_alloc_txq_mem(struct iwl_priv *priv);
void iwl_free_txq_mem(struct iwl_priv *priv);
-void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
- __le32 *tx_flags);
+void iwlcore_tx_cmd_protection(struct iwl_priv *priv,
+ struct ieee80211_tx_info *info,
+ __le16 fc, __le32 *tx_flags);
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_alloc_traffic_mem(struct iwl_priv *priv);
void iwl_free_traffic_mem(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index d24eb47..70c4b8f 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -435,10 +435,7 @@
tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
}
- priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
-
- if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
- tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+ priv->cfg->ops->utils->tx_cmd_protection(priv, info, fc, &tx_flags);
tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
if (ieee80211_is_mgmt(fc)) {
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 2372abb..3e82f16 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -9,6 +9,7 @@
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/slab.h>
+#include <linux/sched.h>
#include <linux/ieee80211.h>
#include <net/cfg80211.h>
#include <asm/unaligned.h>
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 71a101f..822f8dc 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -43,8 +43,6 @@
{ PCI_DEVICE(0x1260, 0x3886) },
/* Intersil PRISM Xbow Wireless LAN adapter (Symbol AP-300) */
{ PCI_DEVICE(0x1260, 0xffff) },
- /* Standard Microsystems Corp SMC2802W Wireless PCI */
- { PCI_DEVICE(0x10b8, 0x2802) },
{ },
};
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index a75ed30..8e4153d 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -386,7 +386,7 @@
struct chbk *p_ch;
CLAW_DBF_TEXT(4, trace, "claw_tx");
- p_ch=&privptr->channel[WRITE];
+ p_ch = &privptr->channel[WRITE_CHANNEL];
spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
rc=claw_hw_tx( skb, dev, 1 );
spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
@@ -407,7 +407,7 @@
claw_pack_skb(struct claw_privbk *privptr)
{
struct sk_buff *new_skb,*held_skb;
- struct chbk *p_ch = &privptr->channel[WRITE];
+ struct chbk *p_ch = &privptr->channel[WRITE_CHANNEL];
struct claw_env *p_env = privptr->p_env;
int pkt_cnt,pk_ind,so_far;
@@ -515,15 +515,15 @@
privptr->p_env->write_size=CLAW_FRAME_SIZE;
}
claw_set_busy(dev);
- tasklet_init(&privptr->channel[READ].tasklet, claw_irq_tasklet,
- (unsigned long) &privptr->channel[READ]);
+ tasklet_init(&privptr->channel[READ_CHANNEL].tasklet, claw_irq_tasklet,
+ (unsigned long) &privptr->channel[READ_CHANNEL]);
for ( i = 0; i < 2; i++) {
CLAW_DBF_TEXT_(2, trace, "opn_ch%d", i);
init_waitqueue_head(&privptr->channel[i].wait);
/* skb_queue_head_init(&p_ch->io_queue); */
- if (i == WRITE)
+ if (i == WRITE_CHANNEL)
skb_queue_head_init(
- &privptr->channel[WRITE].collect_queue);
+ &privptr->channel[WRITE_CHANNEL].collect_queue);
privptr->channel[i].flag_a = 0;
privptr->channel[i].IO_active = 0;
privptr->channel[i].flag &= ~CLAW_TIMER;
@@ -551,12 +551,12 @@
if((privptr->channel[i].flag & CLAW_TIMER) == 0x00)
del_timer(&timer);
}
- if ((((privptr->channel[READ].last_dstat |
- privptr->channel[WRITE].last_dstat) &
+ if ((((privptr->channel[READ_CHANNEL].last_dstat |
+ privptr->channel[WRITE_CHANNEL].last_dstat) &
~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) ||
- (((privptr->channel[READ].flag |
- privptr->channel[WRITE].flag) & CLAW_TIMER) != 0x00)) {
- dev_info(&privptr->channel[READ].cdev->dev,
+ (((privptr->channel[READ_CHANNEL].flag |
+ privptr->channel[WRITE_CHANNEL].flag) & CLAW_TIMER) != 0x00)) {
+ dev_info(&privptr->channel[READ_CHANNEL].cdev->dev,
"%s: remote side is not ready\n", dev->name);
CLAW_DBF_TEXT(2, trace, "notrdy");
@@ -608,8 +608,8 @@
}
}
privptr->buffs_alloc = 0;
- privptr->channel[READ].flag= 0x00;
- privptr->channel[WRITE].flag = 0x00;
+ privptr->channel[READ_CHANNEL].flag = 0x00;
+ privptr->channel[WRITE_CHANNEL].flag = 0x00;
privptr->p_buff_ccw=NULL;
privptr->p_buff_read=NULL;
privptr->p_buff_write=NULL;
@@ -652,10 +652,10 @@
}
/* Try to extract channel from driver data. */
- if (privptr->channel[READ].cdev == cdev)
- p_ch = &privptr->channel[READ];
- else if (privptr->channel[WRITE].cdev == cdev)
- p_ch = &privptr->channel[WRITE];
+ if (privptr->channel[READ_CHANNEL].cdev == cdev)
+ p_ch = &privptr->channel[READ_CHANNEL];
+ else if (privptr->channel[WRITE_CHANNEL].cdev == cdev)
+ p_ch = &privptr->channel[WRITE_CHANNEL];
else {
dev_warn(&cdev->dev, "The device is not a CLAW device\n");
CLAW_DBF_TEXT(2, trace, "badchan");
@@ -813,7 +813,7 @@
claw_clearbit_busy(TB_TX, dev);
claw_clear_busy(dev);
}
- p_ch_r = (struct chbk *)&privptr->channel[READ];
+ p_ch_r = (struct chbk *)&privptr->channel[READ_CHANNEL];
if (test_and_set_bit(CLAW_BH_ACTIVE,
(void *)&p_ch_r->flag_a) == 0)
tasklet_schedule(&p_ch_r->tasklet);
@@ -878,13 +878,13 @@
for ( i = 1; i >=0 ; i--) {
spin_lock_irqsave(
get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
- /* del_timer(&privptr->channel[READ].timer); */
+ /* del_timer(&privptr->channel[READ_CHANNEL].timer); */
privptr->channel[i].claw_state = CLAW_STOP;
privptr->channel[i].IO_active = 0;
parm = (unsigned long) &privptr->channel[i];
- if (i == WRITE)
+ if (i == WRITE_CHANNEL)
claw_purge_skb_queue(
- &privptr->channel[WRITE].collect_queue);
+ &privptr->channel[WRITE_CHANNEL].collect_queue);
rc = ccw_device_halt (privptr->channel[i].cdev, parm);
if (privptr->system_validate_comp==0x00) /* never opened? */
init_waitqueue_head(&privptr->channel[i].wait);
@@ -971,16 +971,16 @@
privptr->mtc_skipping = 1;
privptr->mtc_offset=0;
- if (((privptr->channel[READ].last_dstat |
- privptr->channel[WRITE].last_dstat) &
+ if (((privptr->channel[READ_CHANNEL].last_dstat |
+ privptr->channel[WRITE_CHANNEL].last_dstat) &
~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) {
- dev_warn(&privptr->channel[READ].cdev->dev,
+ dev_warn(&privptr->channel[READ_CHANNEL].cdev->dev,
"Deactivating %s completed with incorrect"
" subchannel status "
"(read %02x, write %02x)\n",
dev->name,
- privptr->channel[READ].last_dstat,
- privptr->channel[WRITE].last_dstat);
+ privptr->channel[READ_CHANNEL].last_dstat,
+ privptr->channel[WRITE_CHANNEL].last_dstat);
CLAW_DBF_TEXT(2, trace, "badclose");
}
CLAW_DBF_TEXT(4, trace, "rlsexit");
@@ -1324,7 +1324,7 @@
CLAW_DBF_TEXT(4, trace, "hw_tx");
privptr = (struct claw_privbk *)(dev->ml_priv);
- p_ch=(struct chbk *)&privptr->channel[WRITE];
+ p_ch = (struct chbk *)&privptr->channel[WRITE_CHANNEL];
p_env =privptr->p_env;
claw_free_wrt_buf(dev); /* Clean up free chain if posible */
/* scan the write queue to free any completed write packets */
@@ -1357,7 +1357,7 @@
claw_strt_out_IO(dev );
claw_free_wrt_buf( dev );
if (privptr->write_free_count==0) {
- ch = &privptr->channel[WRITE];
+ ch = &privptr->channel[WRITE_CHANNEL];
atomic_inc(&skb->users);
skb_queue_tail(&ch->collect_queue, skb);
goto Done;
@@ -1369,7 +1369,7 @@
}
/* tx lock */
if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */
- ch = &privptr->channel[WRITE];
+ ch = &privptr->channel[WRITE_CHANNEL];
atomic_inc(&skb->users);
skb_queue_tail(&ch->collect_queue, skb);
claw_strt_out_IO(dev );
@@ -1385,7 +1385,7 @@
privptr->p_write_free_chain == NULL ) {
claw_setbit_busy(TB_NOBUFFER,dev);
- ch = &privptr->channel[WRITE];
+ ch = &privptr->channel[WRITE_CHANNEL];
atomic_inc(&skb->users);
skb_queue_tail(&ch->collect_queue, skb);
CLAW_DBF_TEXT(2, trace, "clawbusy");
@@ -1397,7 +1397,7 @@
while (len_of_data > 0) {
p_this_ccw=privptr->p_write_free_chain; /* get a block */
if (p_this_ccw == NULL) { /* lost the race */
- ch = &privptr->channel[WRITE];
+ ch = &privptr->channel[WRITE_CHANNEL];
atomic_inc(&skb->users);
skb_queue_tail(&ch->collect_queue, skb);
goto Done2;
@@ -2067,7 +2067,7 @@
*catch up to each other */
privptr = dev->ml_priv;
p_env=privptr->p_env;
- tdev = &privptr->channel[READ].cdev->dev;
+ tdev = &privptr->channel[READ_CHANNEL].cdev->dev;
memcpy( &temp_host_name, p_env->host_name, 8);
memcpy( &temp_ws_name, p_env->adapter_name , 8);
dev_info(tdev, "%s: CLAW device %.8s: "
@@ -2245,7 +2245,7 @@
dev->name, temp_ws_name,
p_ctlbk->linkid);
privptr->active_link_ID = p_ctlbk->linkid;
- p_ch = &privptr->channel[WRITE];
+ p_ch = &privptr->channel[WRITE_CHANNEL];
wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */
break;
case CONNECTION_RESPONSE:
@@ -2296,7 +2296,7 @@
"%s: Confirmed Now packing\n", dev->name);
p_env->packing = DO_PACKED;
}
- p_ch = &privptr->channel[WRITE];
+ p_ch = &privptr->channel[WRITE_CHANNEL];
wake_up(&p_ch->wait);
} else {
dev_warn(tdev, "Activating %s failed because of"
@@ -2556,7 +2556,7 @@
p_packd=NULL;
privptr = dev->ml_priv;
- p_dev = &privptr->channel[READ].cdev->dev;
+ p_dev = &privptr->channel[READ_CHANNEL].cdev->dev;
p_env = privptr->p_env;
p_this_ccw=privptr->p_read_active_first;
while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) {
@@ -2728,7 +2728,7 @@
struct ccwbk*p_ccwbk;
struct chbk *p_ch;
struct clawh *p_clawh;
- p_ch=&privptr->channel[READ];
+ p_ch = &privptr->channel[READ_CHANNEL];
CLAW_DBF_TEXT(4, trace, "StRdNter");
p_clawh=(struct clawh *)privptr->p_claw_signal_blk;
@@ -2782,7 +2782,7 @@
return;
}
privptr = (struct claw_privbk *)dev->ml_priv;
- p_ch=&privptr->channel[WRITE];
+ p_ch = &privptr->channel[WRITE_CHANNEL];
CLAW_DBF_TEXT(4, trace, "strt_io");
p_first_ccw=privptr->p_write_active_first;
@@ -2875,7 +2875,7 @@
if (dev->flags & IFF_RUNNING)
claw_release(dev);
if (privptr) {
- privptr->channel[READ].ndev = NULL; /* say it's free */
+ privptr->channel[READ_CHANNEL].ndev = NULL; /* say it's free */
}
dev->ml_priv = NULL;
#ifdef MODULE
@@ -2960,18 +2960,18 @@
struct ccw_dev_id dev_id;
dev_info(&cgdev->dev, "add for %s\n",
- dev_name(&cgdev->cdev[READ]->dev));
+ dev_name(&cgdev->cdev[READ_CHANNEL]->dev));
CLAW_DBF_TEXT(2, setup, "new_dev");
privptr = dev_get_drvdata(&cgdev->dev);
- dev_set_drvdata(&cgdev->cdev[READ]->dev, privptr);
- dev_set_drvdata(&cgdev->cdev[WRITE]->dev, privptr);
+ dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
+ dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
if (!privptr)
return -ENODEV;
p_env = privptr->p_env;
- ccw_device_get_id(cgdev->cdev[READ], &dev_id);
- p_env->devno[READ] = dev_id.devno;
- ccw_device_get_id(cgdev->cdev[WRITE], &dev_id);
- p_env->devno[WRITE] = dev_id.devno;
+ ccw_device_get_id(cgdev->cdev[READ_CHANNEL], &dev_id);
+ p_env->devno[READ_CHANNEL] = dev_id.devno;
+ ccw_device_get_id(cgdev->cdev[WRITE_CHANNEL], &dev_id);
+ p_env->devno[WRITE_CHANNEL] = dev_id.devno;
ret = add_channel(cgdev->cdev[0],0,privptr);
if (ret == 0)
ret = add_channel(cgdev->cdev[1],1,privptr);
@@ -2980,14 +2980,14 @@
" failed with error code %d\n", ret);
goto out;
}
- ret = ccw_device_set_online(cgdev->cdev[READ]);
+ ret = ccw_device_set_online(cgdev->cdev[READ_CHANNEL]);
if (ret != 0) {
dev_warn(&cgdev->dev,
"Setting the read subchannel online"
" failed with error code %d\n", ret);
goto out;
}
- ret = ccw_device_set_online(cgdev->cdev[WRITE]);
+ ret = ccw_device_set_online(cgdev->cdev[WRITE_CHANNEL]);
if (ret != 0) {
dev_warn(&cgdev->dev,
"Setting the write subchannel online "
@@ -3002,8 +3002,8 @@
}
dev->ml_priv = privptr;
dev_set_drvdata(&cgdev->dev, privptr);
- dev_set_drvdata(&cgdev->cdev[READ]->dev, privptr);
- dev_set_drvdata(&cgdev->cdev[WRITE]->dev, privptr);
+ dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
+ dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
/* sysfs magic */
SET_NETDEV_DEV(dev, &cgdev->dev);
if (register_netdev(dev) != 0) {
@@ -3021,16 +3021,16 @@
goto out;
}
}
- privptr->channel[READ].ndev = dev;
- privptr->channel[WRITE].ndev = dev;
+ privptr->channel[READ_CHANNEL].ndev = dev;
+ privptr->channel[WRITE_CHANNEL].ndev = dev;
privptr->p_env->ndev = dev;
dev_info(&cgdev->dev, "%s:readsize=%d writesize=%d "
"readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n",
dev->name, p_env->read_size,
p_env->write_size, p_env->read_buffers,
- p_env->write_buffers, p_env->devno[READ],
- p_env->devno[WRITE]);
+ p_env->write_buffers, p_env->devno[READ_CHANNEL],
+ p_env->devno[WRITE_CHANNEL]);
dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name "
":%.8s api_type: %.8s\n",
dev->name, p_env->host_name,
@@ -3072,10 +3072,10 @@
priv = dev_get_drvdata(&cgdev->dev);
if (!priv)
return -ENODEV;
- ndev = priv->channel[READ].ndev;
+ ndev = priv->channel[READ_CHANNEL].ndev;
if (ndev) {
/* Close the device */
- dev_info(&cgdev->dev, "%s: shutting down \n",
+ dev_info(&cgdev->dev, "%s: shutting down\n",
ndev->name);
if (ndev->flags & IFF_RUNNING)
ret = claw_release(ndev);
@@ -3083,8 +3083,8 @@
unregister_netdev(ndev);
ndev->ml_priv = NULL; /* cgdev data, not ndev's to free */
claw_free_netdevice(ndev, 1);
- priv->channel[READ].ndev = NULL;
- priv->channel[WRITE].ndev = NULL;
+ priv->channel[READ_CHANNEL].ndev = NULL;
+ priv->channel[WRITE_CHANNEL].ndev = NULL;
priv->p_env->ndev = NULL;
}
ccw_device_set_offline(cgdev->cdev[1]);
@@ -3115,8 +3115,8 @@
priv->channel[1].irb=NULL;
kfree(priv);
dev_set_drvdata(&cgdev->dev, NULL);
- dev_set_drvdata(&cgdev->cdev[READ]->dev, NULL);
- dev_set_drvdata(&cgdev->cdev[WRITE]->dev, NULL);
+ dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, NULL);
+ dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, NULL);
put_device(&cgdev->dev);
return;
diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h
index 46d59a13..1bc5904 100644
--- a/drivers/s390/net/claw.h
+++ b/drivers/s390/net/claw.h
@@ -74,8 +74,8 @@
#define MAX_ENVELOPE_SIZE 65536
#define CLAW_DEFAULT_MTU_SIZE 4096
#define DEF_PACK_BUFSIZE 32768
-#define READ 0
-#define WRITE 1
+#define READ_CHANNEL 0
+#define WRITE_CHANNEL 1
#define TB_TX 0 /* sk buffer handling in process */
#define TB_STOP 1 /* network device stop in process */
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 70eb7f1..8c921fc 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -454,7 +454,7 @@
if ((fsmstate == CTC_STATE_SETUPWAIT) &&
(ch->protocol == CTCM_PROTO_OS390)) {
/* OS/390 resp. z/OS */
- if (CHANNEL_DIRECTION(ch->flags) == READ) {
+ if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
*((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC,
CTC_EVENT_TIMER, ch);
@@ -472,14 +472,14 @@
* if in compatibility mode, since VM TCP delays the initial
* frame until it has some data to send.
*/
- if ((CHANNEL_DIRECTION(ch->flags) == WRITE) ||
+ if ((CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) ||
(ch->protocol != CTCM_PROTO_S390))
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
*((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
ch->ccw[1].count = 2; /* Transfer only length */
- fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
+ fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
rc = ccw_device_start(ch->cdev, &ch->ccw[0],
(unsigned long)ch, 0xff, 0);
@@ -495,7 +495,7 @@
* reply from VM TCP which brings up the RX channel to it's
* final state.
*/
- if ((CHANNEL_DIRECTION(ch->flags) == READ) &&
+ if ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) &&
(ch->protocol == CTCM_PROTO_S390)) {
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
@@ -600,15 +600,15 @@
int rc;
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s",
- CTCM_FUNTAIL, ch->id,
- (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
+ CTCM_FUNTAIL, ch->id,
+ (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX");
if (ch->trans_skb != NULL) {
clear_normalized_cda(&ch->ccw[1]);
dev_kfree_skb(ch->trans_skb);
ch->trans_skb = NULL;
}
- if (CHANNEL_DIRECTION(ch->flags) == READ) {
+ if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
ch->ccw[1].cmd_code = CCW_CMD_READ;
ch->ccw[1].flags = CCW_FLAG_SLI;
ch->ccw[1].count = 0;
@@ -622,7 +622,8 @@
"%s(%s): %s trans_skb alloc delayed "
"until first transfer",
CTCM_FUNTAIL, ch->id,
- (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
+ (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
+ "RX" : "TX");
}
ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
@@ -720,7 +721,7 @@
ch->th_seg = 0x00;
ch->th_seq_num = 0x00;
- if (CHANNEL_DIRECTION(ch->flags) == READ) {
+ if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
skb_queue_purge(&ch->io_queue);
fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
} else {
@@ -799,7 +800,8 @@
fsm_newstate(fi, CTC_STATE_STARTRETRY);
fsm_deltimer(&ch->timer);
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
- if (!IS_MPC(ch) && (CHANNEL_DIRECTION(ch->flags) == READ)) {
+ if (!IS_MPC(ch) &&
+ (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) {
int rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
if (rc != 0)
ctcm_ccw_check_rc(ch, rc,
@@ -811,10 +813,10 @@
CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT,
"%s(%s) : %s error during %s channel setup state=%s\n",
CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event],
- (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX",
+ (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX",
fsm_getstate_str(fi));
- if (CHANNEL_DIRECTION(ch->flags) == READ) {
+ if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
fsm_newstate(fi, CTC_STATE_RXERR);
fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
} else {
@@ -945,7 +947,7 @@
fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
fsm_newstate(fi, CTC_STATE_DTERM);
- ch2 = priv->channel[WRITE];
+ ch2 = priv->channel[CTCM_WRITE];
fsm_newstate(ch2->fsm, CTC_STATE_DTERM);
ccw_device_halt(ch->cdev, (unsigned long)ch);
@@ -1074,13 +1076,13 @@
fsm_deltimer(&ch->timer);
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s: %s: %s unrecoverable channel error",
- CTCM_FUNTAIL, ch->id, rd == READ ? "RX" : "TX");
+ CTCM_FUNTAIL, ch->id, rd == CTCM_READ ? "RX" : "TX");
if (IS_MPC(ch)) {
priv->stats.tx_dropped++;
priv->stats.tx_errors++;
}
- if (rd == READ) {
+ if (rd == CTCM_READ) {
fsm_newstate(fi, CTC_STATE_RXERR);
fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
} else {
@@ -1503,7 +1505,7 @@
switch (fsm_getstate(fi)) {
case CTC_STATE_STARTRETRY:
case CTC_STATE_SETUPWAIT:
- if (CHANNEL_DIRECTION(ch->flags) == READ) {
+ if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
ctcmpc_chx_rxidle(fi, event, arg);
} else {
fsm_newstate(fi, CTC_STATE_TXIDLE);
@@ -1514,7 +1516,7 @@
break;
};
- fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
+ fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
done:
@@ -1753,8 +1755,8 @@
struct net_device *dev = ach->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
- struct channel *wch = priv->channel[WRITE];
- struct channel *rch = priv->channel[READ];
+ struct channel *wch = priv->channel[CTCM_WRITE];
+ struct channel *rch = priv->channel[CTCM_READ];
struct sk_buff *skb;
struct th_sweep *header;
int rc = 0;
@@ -2070,7 +2072,7 @@
fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
if (IS_MPC(priv))
priv->mpcg->channels_terminating = 0;
- for (direction = READ; direction <= WRITE; direction++) {
+ for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
struct channel *ch = priv->channel[direction];
fsm_event(ch->fsm, CTC_EVENT_START, ch);
}
@@ -2092,7 +2094,7 @@
CTCMY_DBF_DEV_NAME(SETUP, dev, "");
fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
- for (direction = READ; direction <= WRITE; direction++) {
+ for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
struct channel *ch = priv->channel[direction];
fsm_event(ch->fsm, CTC_EVENT_STOP, ch);
ch->th_seq_num = 0x00;
@@ -2183,11 +2185,11 @@
if (IS_MPC(priv)) {
if (event == DEV_EVENT_RXUP)
- mpc_channel_action(priv->channel[READ],
- READ, MPC_CHANNEL_ADD);
+ mpc_channel_action(priv->channel[CTCM_READ],
+ CTCM_READ, MPC_CHANNEL_ADD);
else
- mpc_channel_action(priv->channel[WRITE],
- WRITE, MPC_CHANNEL_ADD);
+ mpc_channel_action(priv->channel[CTCM_WRITE],
+ CTCM_WRITE, MPC_CHANNEL_ADD);
}
}
@@ -2239,11 +2241,11 @@
}
if (IS_MPC(priv)) {
if (event == DEV_EVENT_RXDOWN)
- mpc_channel_action(priv->channel[READ],
- READ, MPC_CHANNEL_REMOVE);
+ mpc_channel_action(priv->channel[CTCM_READ],
+ CTCM_READ, MPC_CHANNEL_REMOVE);
else
- mpc_channel_action(priv->channel[WRITE],
- WRITE, MPC_CHANNEL_REMOVE);
+ mpc_channel_action(priv->channel[CTCM_WRITE],
+ CTCM_WRITE, MPC_CHANNEL_REMOVE);
}
}
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 4ecafbf..6edf20b 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -267,7 +267,7 @@
else {
ch->flags |= CHANNEL_FLAGS_INUSE;
ch->flags &= ~CHANNEL_FLAGS_RWMASK;
- ch->flags |= (direction == WRITE)
+ ch->flags |= (direction == CTCM_WRITE)
? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
fsm_newstate(ch->fsm, CTC_STATE_STOPPED);
}
@@ -388,7 +388,8 @@
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): %s trans_skb allocation error",
CTCM_FUNTAIL, ch->id,
- (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
+ (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
+ "RX" : "TX");
return -ENOMEM;
}
@@ -399,7 +400,8 @@
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): %s set norm_cda failed",
CTCM_FUNTAIL, ch->id,
- (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
+ (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
+ "RX" : "TX");
return -ENOMEM;
}
@@ -603,14 +605,14 @@
priv = dev->ml_priv;
grp = priv->mpcg;
- ch = priv->channel[WRITE];
+ ch = priv->channel[CTCM_WRITE];
/* sweep processing is not complete until response and request */
/* has completed for all read channels in group */
if (grp->in_sweep == 0) {
grp->in_sweep = 1;
- grp->sweep_rsp_pend_num = grp->active_channels[READ];
- grp->sweep_req_pend_num = grp->active_channels[READ];
+ grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ];
+ grp->sweep_req_pend_num = grp->active_channels[CTCM_READ];
}
sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA);
@@ -911,7 +913,7 @@
return NETDEV_TX_BUSY;
dev->trans_start = jiffies;
- if (ctcm_transmit_skb(priv->channel[WRITE], skb) != 0)
+ if (ctcm_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0)
return NETDEV_TX_BUSY;
return NETDEV_TX_OK;
}
@@ -994,7 +996,7 @@
}
dev->trans_start = jiffies;
- if (ctcmpc_transmit_skb(priv->channel[WRITE], skb) != 0) {
+ if (ctcmpc_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): device error - dropped",
CTCM_FUNTAIL, dev->name);
@@ -1035,7 +1037,7 @@
return -EINVAL;
priv = dev->ml_priv;
- max_bufsize = priv->channel[READ]->max_bufsize;
+ max_bufsize = priv->channel[CTCM_READ]->max_bufsize;
if (IS_MPC(priv)) {
if (new_mtu > max_bufsize - TH_HEADER_LENGTH)
@@ -1226,10 +1228,10 @@
priv = dev_get_drvdata(&cgdev->dev);
/* Try to extract channel from driver data. */
- if (priv->channel[READ]->cdev == cdev)
- ch = priv->channel[READ];
- else if (priv->channel[WRITE]->cdev == cdev)
- ch = priv->channel[WRITE];
+ if (priv->channel[CTCM_READ]->cdev == cdev)
+ ch = priv->channel[CTCM_READ];
+ else if (priv->channel[CTCM_WRITE]->cdev == cdev)
+ ch = priv->channel[CTCM_WRITE];
else {
dev_err(&cdev->dev,
"%s: Internal error: Can't determine channel for "
@@ -1587,13 +1589,13 @@
goto out_ccw2;
}
- for (direction = READ; direction <= WRITE; direction++) {
+ for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
priv->channel[direction] =
- channel_get(type, direction == READ ? read_id : write_id,
- direction);
+ channel_get(type, direction == CTCM_READ ?
+ read_id : write_id, direction);
if (priv->channel[direction] == NULL) {
- if (direction == WRITE)
- channel_free(priv->channel[READ]);
+ if (direction == CTCM_WRITE)
+ channel_free(priv->channel[CTCM_READ]);
goto out_dev;
}
priv->channel[direction]->netdev = dev;
@@ -1617,13 +1619,13 @@
dev_info(&dev->dev,
"setup OK : r/w = %s/%s, protocol : %d\n",
- priv->channel[READ]->id,
- priv->channel[WRITE]->id, priv->protocol);
+ priv->channel[CTCM_READ]->id,
+ priv->channel[CTCM_WRITE]->id, priv->protocol);
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
"setup(%s) OK : r/w = %s/%s, protocol : %d", dev->name,
- priv->channel[READ]->id,
- priv->channel[WRITE]->id, priv->protocol);
+ priv->channel[CTCM_READ]->id,
+ priv->channel[CTCM_WRITE]->id, priv->protocol);
return 0;
out_unregister:
@@ -1635,10 +1637,10 @@
out_ccw1:
ccw_device_set_offline(cgdev->cdev[0]);
out_remove_channel2:
- readc = channel_get(type, read_id, READ);
+ readc = channel_get(type, read_id, CTCM_READ);
channel_remove(readc);
out_remove_channel1:
- writec = channel_get(type, write_id, WRITE);
+ writec = channel_get(type, write_id, CTCM_WRITE);
channel_remove(writec);
out_err_result:
return result;
@@ -1660,19 +1662,19 @@
if (!priv)
return -ENODEV;
- if (priv->channel[READ]) {
- dev = priv->channel[READ]->netdev;
+ if (priv->channel[CTCM_READ]) {
+ dev = priv->channel[CTCM_READ]->netdev;
CTCM_DBF_DEV(SETUP, dev, "");
/* Close the device */
ctcm_close(dev);
dev->flags &= ~IFF_RUNNING;
ctcm_remove_attributes(&cgdev->dev);
- channel_free(priv->channel[READ]);
+ channel_free(priv->channel[CTCM_READ]);
} else
dev = NULL;
- if (priv->channel[WRITE])
- channel_free(priv->channel[WRITE]);
+ if (priv->channel[CTCM_WRITE])
+ channel_free(priv->channel[CTCM_WRITE]);
if (dev) {
unregister_netdev(dev);
@@ -1685,11 +1687,11 @@
ccw_device_set_offline(cgdev->cdev[1]);
ccw_device_set_offline(cgdev->cdev[0]);
- if (priv->channel[READ])
- channel_remove(priv->channel[READ]);
- if (priv->channel[WRITE])
- channel_remove(priv->channel[WRITE]);
- priv->channel[READ] = priv->channel[WRITE] = NULL;
+ if (priv->channel[CTCM_READ])
+ channel_remove(priv->channel[CTCM_READ]);
+ if (priv->channel[CTCM_WRITE])
+ channel_remove(priv->channel[CTCM_WRITE]);
+ priv->channel[CTCM_READ] = priv->channel[CTCM_WRITE] = NULL;
return 0;
@@ -1720,11 +1722,11 @@
if (gdev->state == CCWGROUP_OFFLINE)
return 0;
- netif_device_detach(priv->channel[READ]->netdev);
- ctcm_close(priv->channel[READ]->netdev);
+ netif_device_detach(priv->channel[CTCM_READ]->netdev);
+ ctcm_close(priv->channel[CTCM_READ]->netdev);
if (!wait_event_timeout(priv->fsm->wait_q,
fsm_getstate(priv->fsm) == DEV_STATE_STOPPED, CTCM_TIME_5_SEC)) {
- netif_device_attach(priv->channel[READ]->netdev);
+ netif_device_attach(priv->channel[CTCM_READ]->netdev);
return -EBUSY;
}
ccw_device_set_offline(gdev->cdev[1]);
@@ -1745,9 +1747,9 @@
rc = ccw_device_set_online(gdev->cdev[0]);
if (rc)
goto err_out;
- ctcm_open(priv->channel[READ]->netdev);
+ ctcm_open(priv->channel[CTCM_READ]->netdev);
err_out:
- netif_device_attach(priv->channel[READ]->netdev);
+ netif_device_attach(priv->channel[CTCM_READ]->netdev);
return rc;
}
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h
index d34fa14..24d5215 100644
--- a/drivers/s390/net/ctcm_main.h
+++ b/drivers/s390/net/ctcm_main.h
@@ -111,8 +111,8 @@
#define CTCM_INITIAL_BLOCKLEN 2
-#define READ 0
-#define WRITE 1
+#define CTCM_READ 0
+#define CTCM_WRITE 1
#define CTCM_ID_SIZE 20+3
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 87c24d2..2861e78 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -419,8 +419,8 @@
return;
priv = dev->ml_priv;
grp = priv->mpcg;
- rch = priv->channel[READ];
- wch = priv->channel[WRITE];
+ rch = priv->channel[CTCM_READ];
+ wch = priv->channel[CTCM_WRITE];
CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO,
"%s(%s): state=%s",
@@ -578,7 +578,7 @@
"%s: %s: flowc = %d",
CTCM_FUNTAIL, dev->name, flowc);
- rch = priv->channel[READ];
+ rch = priv->channel[CTCM_READ];
mpcg_state = fsm_getstate(grp->fsm);
switch (flowc) {
@@ -622,7 +622,7 @@
struct net_device *dev = rch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
- struct channel *ch = priv->channel[WRITE];
+ struct channel *ch = priv->channel[CTCM_WRITE];
CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, ch, ch->id);
CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH);
@@ -656,7 +656,7 @@
int rc = 0;
struct th_sweep *header;
struct sk_buff *sweep_skb;
- struct channel *ch = priv->channel[WRITE];
+ struct channel *ch = priv->channel[CTCM_WRITE];
CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, rch, rch->id);
@@ -712,7 +712,7 @@
struct net_device *dev = rch->netdev;
struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
- struct channel *ch = priv->channel[WRITE];
+ struct channel *ch = priv->channel[CTCM_WRITE];
if (do_debug)
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
@@ -721,8 +721,8 @@
if (grp->in_sweep == 0) {
grp->in_sweep = 1;
ctcm_test_and_set_busy(dev);
- grp->sweep_req_pend_num = grp->active_channels[READ];
- grp->sweep_rsp_pend_num = grp->active_channels[READ];
+ grp->sweep_req_pend_num = grp->active_channels[CTCM_READ];
+ grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ];
}
CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH);
@@ -906,14 +906,14 @@
fsm_newstate(grp->fsm, MPCG_STATE_READY);
/* Put up a read on the channel */
- ch = priv->channel[READ];
+ ch = priv->channel[CTCM_READ];
ch->pdu_seq = 0;
CTCM_PR_DBGDATA("ctcmpc: %s() ToDCM_pdu_seq= %08x\n" ,
__func__, ch->pdu_seq);
ctcmpc_chx_rxidle(ch->fsm, CTC_EVENT_START, ch);
/* Put the write channel in idle state */
- ch = priv->channel[WRITE];
+ ch = priv->channel[CTCM_WRITE];
if (ch->collect_len > 0) {
spin_lock(&ch->collect_lock);
ctcm_purge_skb_queue(&ch->collect_queue);
@@ -960,7 +960,8 @@
"%s: %i / Grp:%s total_channels=%i, active_channels: "
"read=%i, write=%i\n", __func__, action,
fsm_getstate_str(grp->fsm), grp->num_channel_paths,
- grp->active_channels[READ], grp->active_channels[WRITE]);
+ grp->active_channels[CTCM_READ],
+ grp->active_channels[CTCM_WRITE]);
if ((action == MPC_CHANNEL_ADD) && (ch->in_mpcgroup == 0)) {
grp->num_channel_paths++;
@@ -994,10 +995,11 @@
grp->xid_skb->data,
grp->xid_skb->len);
- ch->xid->xid2_dlc_type = ((CHANNEL_DIRECTION(ch->flags) == READ)
+ ch->xid->xid2_dlc_type =
+ ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
? XID2_READ_SIDE : XID2_WRITE_SIDE);
- if (CHANNEL_DIRECTION(ch->flags) == WRITE)
+ if (CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE)
ch->xid->xid2_buf_len = 0x00;
ch->xid_skb->data = ch->xid_skb_data;
@@ -1006,8 +1008,8 @@
fsm_newstate(ch->fsm, CH_XID0_PENDING);
- if ((grp->active_channels[READ] > 0) &&
- (grp->active_channels[WRITE] > 0) &&
+ if ((grp->active_channels[CTCM_READ] > 0) &&
+ (grp->active_channels[CTCM_WRITE] > 0) &&
(fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) {
fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);
CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE,
@@ -1027,10 +1029,10 @@
if (grp->channels_terminating)
goto done;
- if (((grp->active_channels[READ] == 0) &&
- (grp->active_channels[WRITE] > 0))
- || ((grp->active_channels[WRITE] == 0) &&
- (grp->active_channels[READ] > 0)))
+ if (((grp->active_channels[CTCM_READ] == 0) &&
+ (grp->active_channels[CTCM_WRITE] > 0))
+ || ((grp->active_channels[CTCM_WRITE] == 0) &&
+ (grp->active_channels[CTCM_READ] > 0)))
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
}
done:
@@ -1038,7 +1040,8 @@
"exit %s: %i / Grp:%s total_channels=%i, active_channels: "
"read=%i, write=%i\n", __func__, action,
fsm_getstate_str(grp->fsm), grp->num_channel_paths,
- grp->active_channels[READ], grp->active_channels[WRITE]);
+ grp->active_channels[CTCM_READ],
+ grp->active_channels[CTCM_WRITE]);
CTCM_PR_DEBUG("exit %s: ch=0x%p id=%s\n", __func__, ch, ch->id);
}
@@ -1392,8 +1395,8 @@
(grp->port_persist == 0))
fsm_deltimer(&priv->restart_timer);
- wch = priv->channel[WRITE];
- rch = priv->channel[READ];
+ wch = priv->channel[CTCM_WRITE];
+ rch = priv->channel[CTCM_READ];
switch (grp->saved_state) {
case MPCG_STATE_RESET:
@@ -1480,8 +1483,8 @@
priv = dev->ml_priv;
grp = priv->mpcg;
- wch = priv->channel[WRITE];
- rch = priv->channel[READ];
+ wch = priv->channel[CTCM_WRITE];
+ rch = priv->channel[CTCM_READ];
switch (fsm_getstate(grp->fsm)) {
case MPCG_STATE_XID2INITW:
@@ -1586,7 +1589,7 @@
CTCM_D3_DUMP((char *)xid, XID2_LENGTH);
/*the received direction should be the opposite of ours */
- if (((CHANNEL_DIRECTION(ch->flags) == READ) ? XID2_WRITE_SIDE :
+ if (((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? XID2_WRITE_SIDE :
XID2_READ_SIDE) != xid->xid2_dlc_type) {
rc = 2;
/* XID REJECTED: r/w channel pairing mismatch */
@@ -1912,7 +1915,7 @@
if (grp == NULL)
return;
- for (direction = READ; direction <= WRITE; direction++) {
+ for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
struct channel *ch = priv->channel[direction];
struct xid2 *thisxid = ch->xid;
ch->xid_skb->data = ch->xid_skb_data;
@@ -2152,14 +2155,15 @@
return -ENOMEM;
}
- *((__u32 *)skb_push(skb, 4)) = priv->channel[READ]->pdu_seq;
- priv->channel[READ]->pdu_seq++;
+ *((__u32 *)skb_push(skb, 4)) =
+ priv->channel[CTCM_READ]->pdu_seq;
+ priv->channel[CTCM_READ]->pdu_seq++;
CTCM_PR_DBGDATA("ctcmpc: %s ToDCM_pdu_seq= %08x\n",
- __func__, priv->channel[READ]->pdu_seq);
+ __func__, priv->channel[CTCM_READ]->pdu_seq);
/* receipt of CC03 resets anticipated sequence number on
receiving side */
- priv->channel[READ]->pdu_seq = 0x00;
+ priv->channel[CTCM_READ]->pdu_seq = 0x00;
skb_reset_mac_header(skb);
skb->dev = dev;
skb->protocol = htons(ETH_P_SNAP);
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
index 2b24550..8305319 100644
--- a/drivers/s390/net/ctcm_sysfs.c
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -38,8 +38,8 @@
int bs1;
struct ctcm_priv *priv = dev_get_drvdata(dev);
- if (!(priv && priv->channel[READ] &&
- (ndev = priv->channel[READ]->netdev))) {
+ ndev = priv->channel[CTCM_READ]->netdev;
+ if (!(priv && priv->channel[CTCM_READ] && ndev)) {
CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, "bfnondev");
return -ENODEV;
}
@@ -55,12 +55,12 @@
(bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
goto einval;
- priv->channel[READ]->max_bufsize = bs1;
- priv->channel[WRITE]->max_bufsize = bs1;
+ priv->channel[CTCM_READ]->max_bufsize = bs1;
+ priv->channel[CTCM_WRITE]->max_bufsize = bs1;
if (!(ndev->flags & IFF_RUNNING))
ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
- priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
- priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
+ priv->channel[CTCM_READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
+ priv->channel[CTCM_WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
CTCM_DBF_DEV(SETUP, ndev, buf);
return count;
@@ -85,9 +85,9 @@
p += sprintf(p, " Device FSM state: %s\n",
fsm_getstate_str(priv->fsm));
p += sprintf(p, " RX channel FSM state: %s\n",
- fsm_getstate_str(priv->channel[READ]->fsm));
+ fsm_getstate_str(priv->channel[CTCM_READ]->fsm));
p += sprintf(p, " TX channel FSM state: %s\n",
- fsm_getstate_str(priv->channel[WRITE]->fsm));
+ fsm_getstate_str(priv->channel[CTCM_WRITE]->fsm));
p += sprintf(p, " Max. TX buffer used: %ld\n",
priv->channel[WRITE]->prof.maxmulti);
p += sprintf(p, " Max. chained SKBs: %ld\n",
@@ -102,7 +102,7 @@
priv->channel[WRITE]->prof.tx_time);
printk(KERN_INFO "Statistics for %s:\n%s",
- priv->channel[WRITE]->netdev->name, sbuf);
+ priv->channel[CTCM_WRITE]->netdev->name, sbuf);
kfree(sbuf);
return;
}
@@ -125,7 +125,7 @@
return -ENODEV;
/* Reset statistics */
memset(&priv->channel[WRITE]->prof, 0,
- sizeof(priv->channel[WRITE]->prof));
+ sizeof(priv->channel[CTCM_WRITE]->prof));
return count;
}
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index ffea35c..0d5eeadf 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -31,21 +31,20 @@
* allocate a cell record and fill in its name, VL server address list and
* allocate an anonymous key
*/
-static struct afs_cell *afs_cell_alloc(const char *name, char *vllist)
+static struct afs_cell *afs_cell_alloc(const char *name, unsigned namelen,
+ char *vllist)
{
struct afs_cell *cell;
struct key *key;
- size_t namelen;
char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp, *next;
char *dvllist = NULL, *_vllist = NULL;
char delimiter = ':';
int ret;
- _enter("%s,%s", name, vllist);
+ _enter("%*.*s,%s", namelen, namelen, name ?: "", vllist);
BUG_ON(!name); /* TODO: want to look up "this cell" in the cache */
- namelen = strlen(name);
if (namelen > AFS_MAXCELLNAME) {
_leave(" = -ENAMETOOLONG");
return ERR_PTR(-ENAMETOOLONG);
@@ -73,6 +72,10 @@
if (!vllist || strlen(vllist) < 7) {
ret = dns_query("afsdb", name, namelen, "ipv4", &dvllist, NULL);
if (ret < 0) {
+ if (ret == -ENODATA || ret == -EAGAIN || ret == -ENOKEY)
+ /* translate these errors into something
+ * userspace might understand */
+ ret = -EDESTADDRREQ;
_leave(" = %d", ret);
return ERR_PTR(ret);
}
@@ -138,26 +141,29 @@
}
/*
- * create a cell record
- * - "name" is the name of the cell
- * - "vllist" is a colon separated list of IP addresses in "a.b.c.d" format
+ * afs_cell_crate() - create a cell record
+ * @name: is the name of the cell.
+ * @namsesz: is the strlen of the cell name.
+ * @vllist: is a colon separated list of IP addresses in "a.b.c.d" format.
+ * @retref: is T to return the cell reference when the cell exists.
*/
-struct afs_cell *afs_cell_create(const char *name, char *vllist)
+struct afs_cell *afs_cell_create(const char *name, unsigned namesz,
+ char *vllist, bool retref)
{
struct afs_cell *cell;
int ret;
- _enter("%s,%s", name, vllist);
+ _enter("%*.*s,%s", namesz, namesz, name ?: "", vllist);
down_write(&afs_cells_sem);
read_lock(&afs_cells_lock);
list_for_each_entry(cell, &afs_cells, link) {
- if (strcasecmp(cell->name, name) == 0)
+ if (strncasecmp(cell->name, name, namesz) == 0)
goto duplicate_name;
}
read_unlock(&afs_cells_lock);
- cell = afs_cell_alloc(name, vllist);
+ cell = afs_cell_alloc(name, namesz, vllist);
if (IS_ERR(cell)) {
_leave(" = %ld", PTR_ERR(cell));
up_write(&afs_cells_sem);
@@ -197,8 +203,18 @@
return ERR_PTR(ret);
duplicate_name:
+ if (retref && !IS_ERR(cell))
+ afs_get_cell(cell);
+
read_unlock(&afs_cells_lock);
up_write(&afs_cells_sem);
+
+ if (retref) {
+ _leave(" = %p", cell);
+ return cell;
+ }
+
+ _leave(" = -EEXIST");
return ERR_PTR(-EEXIST);
}
@@ -229,7 +245,7 @@
*cp++ = 0;
/* allocate a cell record for the root cell */
- new_root = afs_cell_create(rootcell, cp);
+ new_root = afs_cell_create(rootcell, strlen(rootcell), cp, false);
if (IS_ERR(new_root)) {
_leave(" = %ld", PTR_ERR(new_root));
return PTR_ERR(new_root);
@@ -249,11 +265,12 @@
/*
* lookup a cell record
*/
-struct afs_cell *afs_cell_lookup(const char *name, unsigned namesz)
+struct afs_cell *afs_cell_lookup(const char *name, unsigned namesz,
+ bool dns_cell)
{
struct afs_cell *cell;
- _enter("\"%*.*s\",", namesz, namesz, name ? name : "");
+ _enter("\"%*.*s\",", namesz, namesz, name ?: "");
down_read(&afs_cells_sem);
read_lock(&afs_cells_lock);
@@ -267,6 +284,8 @@
}
}
cell = ERR_PTR(-ENOENT);
+ if (dns_cell)
+ goto create_cell;
found:
;
} else {
@@ -289,6 +308,15 @@
up_read(&afs_cells_sem);
_leave(" = %p", cell);
return cell;
+
+create_cell:
+ read_unlock(&afs_cells_lock);
+ up_read(&afs_cells_sem);
+
+ cell = afs_cell_create(name, namesz, NULL, true);
+
+ _leave(" = %p", cell);
+ return cell;
}
#if 0
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index b42d5cc..0d38c09 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -477,6 +477,40 @@
}
/*
+ * Try to auto mount the mountpoint with pseudo directory, if the autocell
+ * operation is setted.
+ */
+static struct inode *afs_try_auto_mntpt(
+ int ret, struct dentry *dentry, struct inode *dir, struct key *key,
+ struct afs_fid *fid)
+{
+ const char *devname = dentry->d_name.name;
+ struct afs_vnode *vnode = AFS_FS_I(dir);
+ struct inode *inode;
+
+ _enter("%d, %p{%s}, {%x:%u}, %p",
+ ret, dentry, devname, vnode->fid.vid, vnode->fid.vnode, key);
+
+ if (ret != -ENOENT ||
+ !test_bit(AFS_VNODE_AUTOCELL, &vnode->flags))
+ goto out;
+
+ inode = afs_iget_autocell(dir, devname, strlen(devname), key);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ goto out;
+ }
+
+ *fid = AFS_FS_I(inode)->fid;
+ _leave("= %p", inode);
+ return inode;
+
+out:
+ _leave("= %d", ret);
+ return ERR_PTR(ret);
+}
+
+/*
* look up an entry in a directory
*/
static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
@@ -520,6 +554,13 @@
ret = afs_do_lookup(dir, dentry, &fid, key);
if (ret < 0) {
+ inode = afs_try_auto_mntpt(ret, dentry, dir, key, &fid);
+ if (!IS_ERR(inode)) {
+ key_put(key);
+ goto success;
+ }
+
+ ret = PTR_ERR(inode);
key_put(key);
if (ret == -ENOENT) {
d_add(dentry, NULL);
@@ -539,6 +580,7 @@
return ERR_CAST(inode);
}
+success:
dentry->d_op = &afs_fs_dentry_operations;
d_add(dentry, inode);
@@ -696,8 +738,9 @@
goto zap;
if (dentry->d_inode &&
- test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dentry->d_inode)->flags))
- goto zap;
+ (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dentry->d_inode)->flags) ||
+ test_bit(AFS_VNODE_PSEUDODIR, &AFS_FS_I(dentry->d_inode)->flags)))
+ goto zap;
_leave(" = 0 [keep]");
return 0;
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 320ffef..0747339 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -19,6 +19,8 @@
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/sched.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
#include "internal.h"
struct afs_iget_data {
@@ -102,6 +104,16 @@
}
/*
+ * iget5() comparator for inode created by autocell operations
+ *
+ * These pseudo inodes don't match anything.
+ */
+static int afs_iget5_autocell_test(struct inode *inode, void *opaque)
+{
+ return 0;
+}
+
+/*
* iget5() inode initialiser
*/
static int afs_iget5_set(struct inode *inode, void *opaque)
@@ -118,6 +130,67 @@
}
/*
+ * inode retrieval for autocell
+ */
+struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
+ int namesz, struct key *key)
+{
+ struct afs_iget_data data;
+ struct afs_super_info *as;
+ struct afs_vnode *vnode;
+ struct super_block *sb;
+ struct inode *inode;
+ static atomic_t afs_autocell_ino;
+
+ _enter("{%x:%u},%*.*s,",
+ AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
+ namesz, namesz, dev_name ?: "");
+
+ sb = dir->i_sb;
+ as = sb->s_fs_info;
+ data.volume = as->volume;
+ data.fid.vid = as->volume->vid;
+ data.fid.unique = 0;
+ data.fid.vnode = 0;
+
+ inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
+ afs_iget5_autocell_test, afs_iget5_set,
+ &data);
+ if (!inode) {
+ _leave(" = -ENOMEM");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ _debug("GOT INODE %p { ino=%lu, vl=%x, vn=%x, u=%x }",
+ inode, inode->i_ino, data.fid.vid, data.fid.vnode,
+ data.fid.unique);
+
+ vnode = AFS_FS_I(inode);
+
+ /* there shouldn't be an existing inode */
+ BUG_ON(!(inode->i_state & I_NEW));
+
+ inode->i_size = 0;
+ inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
+ inode->i_op = &afs_autocell_inode_operations;
+ inode->i_nlink = 2;
+ inode->i_uid = 0;
+ inode->i_gid = 0;
+ inode->i_ctime.tv_sec = get_seconds();
+ inode->i_ctime.tv_nsec = 0;
+ inode->i_atime = inode->i_mtime = inode->i_ctime;
+ inode->i_blocks = 0;
+ inode->i_version = 0;
+ inode->i_generation = 0;
+
+ set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags);
+ inode->i_flags |= S_NOATIME;
+ unlock_new_inode(inode);
+ _leave(" = %p", inode);
+ return inode;
+}
+
+/*
* inode retrieval
*/
struct inode *afs_iget(struct super_block *sb, struct key *key,
@@ -314,6 +387,19 @@
}
/*
+ * discard an AFS inode
+ */
+int afs_drop_inode(struct inode *inode)
+{
+ _enter("");
+
+ if (test_bit(AFS_VNODE_PSEUDODIR, &AFS_FS_I(inode)->flags))
+ return generic_delete_inode(inode);
+ else
+ return generic_drop_inode(inode);
+}
+
+/*
* clear an AFS inode
*/
void afs_evict_inode(struct inode *inode)
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index c6c93f1..cca8eef 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -42,6 +42,7 @@
struct afs_mount_params {
bool rwpath; /* T if the parent should be considered R/W */
bool force; /* T to force cell type */
+ bool autocell; /* T if set auto mount operation */
afs_voltype_t type; /* type of volume requested */
int volnamesz; /* size of volume name */
const char *volname; /* name of volume to mount */
@@ -358,6 +359,8 @@
#define AFS_VNODE_READLOCKED 7 /* set if vnode is read-locked on the server */
#define AFS_VNODE_WRITELOCKED 8 /* set if vnode is write-locked on the server */
#define AFS_VNODE_UNLOCKING 9 /* set if vnode is being unlocked on the server */
+#define AFS_VNODE_AUTOCELL 10 /* set if Vnode is an auto mount point */
+#define AFS_VNODE_PSEUDODIR 11 /* set if Vnode is a pseudo directory */
long acl_order; /* ACL check count (callback break count) */
@@ -468,8 +471,8 @@
#define afs_get_cell(C) do { atomic_inc(&(C)->usage); } while(0)
extern int afs_cell_init(char *);
-extern struct afs_cell *afs_cell_create(const char *, char *);
-extern struct afs_cell *afs_cell_lookup(const char *, unsigned);
+extern struct afs_cell *afs_cell_create(const char *, unsigned, char *, bool);
+extern struct afs_cell *afs_cell_lookup(const char *, unsigned, bool);
extern struct afs_cell *afs_grab_cell(struct afs_cell *);
extern void afs_put_cell(struct afs_cell *);
extern void afs_cell_purge(void);
@@ -558,6 +561,8 @@
/*
* inode.c
*/
+extern struct inode *afs_iget_autocell(struct inode *, const char *, int,
+ struct key *);
extern struct inode *afs_iget(struct super_block *, struct key *,
struct afs_fid *, struct afs_file_status *,
struct afs_callback *);
@@ -566,6 +571,7 @@
extern int afs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
extern int afs_setattr(struct dentry *, struct iattr *);
extern void afs_evict_inode(struct inode *);
+extern int afs_drop_inode(struct inode *);
/*
* main.c
@@ -581,6 +587,7 @@
* mntpt.c
*/
extern const struct inode_operations afs_mntpt_inode_operations;
+extern const struct inode_operations afs_autocell_inode_operations;
extern const struct file_operations afs_mntpt_file_operations;
extern int afs_mntpt_check_symlink(struct afs_vnode *, struct key *);
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index a9e2303..6d55268 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -38,6 +38,11 @@
.getattr = afs_getattr,
};
+const struct inode_operations afs_autocell_inode_operations = {
+ .follow_link = afs_mntpt_follow_link,
+ .getattr = afs_getattr,
+};
+
static LIST_HEAD(afs_vfsmounts);
static DECLARE_DELAYED_WORK(afs_mntpt_expiry_timer, afs_mntpt_expiry_timed_out);
@@ -136,20 +141,16 @@
{
struct afs_super_info *super;
struct vfsmount *mnt;
+ struct afs_vnode *vnode;
struct page *page;
- size_t size;
- char *buf, *devname, *options;
+ char *devname, *options;
+ bool rwpath = false;
int ret;
_enter("{%s}", mntpt->d_name.name);
BUG_ON(!mntpt->d_inode);
- ret = -EINVAL;
- size = mntpt->d_inode->i_size;
- if (size > PAGE_SIZE - 1)
- goto error_no_devname;
-
ret = -ENOMEM;
devname = (char *) get_zeroed_page(GFP_KERNEL);
if (!devname)
@@ -159,28 +160,59 @@
if (!options)
goto error_no_options;
- /* read the contents of the AFS special symlink */
- page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto error_no_page;
+ vnode = AFS_FS_I(mntpt->d_inode);
+ if (test_bit(AFS_VNODE_PSEUDODIR, &vnode->flags)) {
+ /* if the directory is a pseudo directory, use the d_name */
+ static const char afs_root_cell[] = ":root.cell.";
+ unsigned size = mntpt->d_name.len;
+
+ ret = -ENOENT;
+ if (size < 2 || size > AFS_MAXCELLNAME)
+ goto error_no_page;
+
+ if (mntpt->d_name.name[0] == '.') {
+ devname[0] = '#';
+ memcpy(devname + 1, mntpt->d_name.name, size - 1);
+ memcpy(devname + size, afs_root_cell,
+ sizeof(afs_root_cell));
+ rwpath = true;
+ } else {
+ devname[0] = '%';
+ memcpy(devname + 1, mntpt->d_name.name, size);
+ memcpy(devname + size + 1, afs_root_cell,
+ sizeof(afs_root_cell));
+ }
+ } else {
+ /* read the contents of the AFS special symlink */
+ loff_t size = i_size_read(mntpt->d_inode);
+ char *buf;
+
+ ret = -EINVAL;
+ if (size > PAGE_SIZE - 1)
+ goto error_no_page;
+
+ page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ goto error_no_page;
+ }
+
+ ret = -EIO;
+ if (PageError(page))
+ goto error;
+
+ buf = kmap_atomic(page, KM_USER0);
+ memcpy(devname, buf, size);
+ kunmap_atomic(buf, KM_USER0);
+ page_cache_release(page);
+ page = NULL;
}
- ret = -EIO;
- if (PageError(page))
- goto error;
-
- buf = kmap_atomic(page, KM_USER0);
- memcpy(devname, buf, size);
- kunmap_atomic(buf, KM_USER0);
- page_cache_release(page);
- page = NULL;
-
/* work out what options we want */
super = AFS_FS_S(mntpt->d_sb);
memcpy(options, "cell=", 5);
strcpy(options + 5, super->volume->cell->name);
- if (super->volume->type == AFSVL_RWVOL)
+ if (super->volume->type == AFSVL_RWVOL || rwpath)
strcat(options, ",rwpath");
/* try and do the mount */
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index 852739d..096b23f 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -294,7 +294,7 @@
if (strcmp(kbuf, "add") == 0) {
struct afs_cell *cell;
- cell = afs_cell_create(name, args);
+ cell = afs_cell_create(name, strlen(name), args, false);
if (IS_ERR(cell)) {
ret = PTR_ERR(cell);
goto done;
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 9cf80f0..77e1e5a 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -16,6 +16,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mount.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/smp_lock.h>
@@ -48,6 +49,7 @@
static const struct super_operations afs_super_ops = {
.statfs = afs_statfs,
.alloc_inode = afs_alloc_inode,
+ .drop_inode = afs_drop_inode,
.destroy_inode = afs_destroy_inode,
.evict_inode = afs_evict_inode,
.put_super = afs_put_super,
@@ -62,12 +64,14 @@
afs_opt_cell,
afs_opt_rwpath,
afs_opt_vol,
+ afs_opt_autocell,
};
static const match_table_t afs_options_list = {
{ afs_opt_cell, "cell=%s" },
{ afs_opt_rwpath, "rwpath" },
{ afs_opt_vol, "vol=%s" },
+ { afs_opt_autocell, "autocell" },
{ afs_no_opt, NULL },
};
@@ -151,7 +155,8 @@
switch (token) {
case afs_opt_cell:
cell = afs_cell_lookup(args[0].from,
- args[0].to - args[0].from);
+ args[0].to - args[0].from,
+ false);
if (IS_ERR(cell))
return PTR_ERR(cell);
afs_put_cell(params->cell);
@@ -166,6 +171,10 @@
*devname = args[0].from;
break;
+ case afs_opt_autocell:
+ params->autocell = 1;
+ break;
+
default:
printk(KERN_ERR "kAFS:"
" Unknown or invalid mount option: '%s'\n", p);
@@ -252,10 +261,10 @@
/* lookup the cell record */
if (cellname || !params->cell) {
- cell = afs_cell_lookup(cellname, cellnamesz);
+ cell = afs_cell_lookup(cellname, cellnamesz, true);
if (IS_ERR(cell)) {
- printk(KERN_ERR "kAFS: unable to lookup cell '%s'\n",
- cellname ?: "");
+ printk(KERN_ERR "kAFS: unable to lookup cell '%*.*s'\n",
+ cellnamesz, cellnamesz, cellname ?: "");
return PTR_ERR(cell);
}
afs_put_cell(params->cell);
@@ -321,6 +330,9 @@
if (IS_ERR(inode))
goto error_inode;
+ if (params->autocell)
+ set_bit(AFS_VNODE_AUTOCELL, &AFS_FS_I(inode)->flags);
+
ret = -ENOMEM;
root = d_alloc_root(inode);
if (!root)
diff --git a/fs/cifs/README b/fs/cifs/README
index a7081ee..7099a52 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -301,6 +301,16 @@
gid Set the default gid for inodes (similar to above).
file_mode If CIFS Unix extensions are not supported by the server
this overrides the default mode for file inodes.
+ fsc Enable local disk caching using FS-Cache (off by default). This
+ option could be useful to improve performance on a slow link,
+ heavily loaded server and/or network where reading from the
+ disk is faster than reading from the server (over the network).
+ This could also impact scalability positively as the
+ number of calls to the server are reduced. However, local
+ caching is not suitable for all workloads for e.g. read-once
+ type workloads. So, you need to consider carefully your
+ workload/scenario before using this option. Currently, local
+ disk caching is functional for CIFS files opened as read-only.
dir_mode If CIFS Unix extensions are not supported by the server
this overrides the default mode for directory inodes.
port attempt to contact the server on this tcp port, before
diff --git a/fs/file_table.c b/fs/file_table.c
index 2fc3b3c..edecd36 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -230,15 +230,6 @@
might_sleep();
fsnotify_close(file);
-
- /*
- * fsnotify_create_event may have taken one or more references on this
- * file. If it did so it left one reference for us to drop to make sure
- * its calls to fput could not prematurely destroy the file.
- */
- if (atomic_long_read(&file->f_count))
- return fput(file);
-
/*
* The function eventpoll_release() should be the first called
* in the file cleanup chain.
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index cc1bb33..26a510a 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -100,3 +100,20 @@
help
Say Y here if you want NFS data to be cached locally on disc through
the general filesystem cache manager
+
+config NFS_USE_LEGACY_DNS
+ bool "Use the legacy NFS DNS resolver"
+ depends on NFS_V4
+ help
+ The kernel now provides a method for translating a host name into an
+ IP address. Select Y here if you would rather use your own DNS
+ resolver script.
+
+ If unsure, say N
+
+config NFS_USE_KERNEL_DNS
+ bool
+ depends on NFS_V4 && !NFS_USE_LEGACY_DNS
+ select DNS_RESOLVER
+ select KEYS
+ default y
diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c
index 76fd235..dba50a5 100644
--- a/fs/nfs/dns_resolve.c
+++ b/fs/nfs/dns_resolve.c
@@ -6,6 +6,29 @@
* Resolves DNS hostnames into valid ip addresses
*/
+#ifdef CONFIG_NFS_USE_KERNEL_DNS
+
+#include <linux/sunrpc/clnt.h>
+#include <linux/dns_resolver.h>
+
+ssize_t nfs_dns_resolve_name(char *name, size_t namelen,
+ struct sockaddr *sa, size_t salen)
+{
+ ssize_t ret;
+ char *ip_addr = NULL;
+ int ip_len;
+
+ ip_len = dns_query(NULL, name, namelen, NULL, &ip_addr, NULL);
+ if (ip_len > 0)
+ ret = rpc_pton(ip_addr, ip_len, sa, salen);
+ else
+ ret = -ESRCH;
+ kfree(ip_addr);
+ return ret;
+}
+
+#else
+
#include <linux/hash.h>
#include <linux/string.h>
#include <linux/kmod.h>
@@ -346,3 +369,4 @@
nfs_cache_unregister(&nfs_dns_resolve);
}
+#endif
diff --git a/fs/nfs/dns_resolve.h b/fs/nfs/dns_resolve.h
index a3f0938..199bb55 100644
--- a/fs/nfs/dns_resolve.h
+++ b/fs/nfs/dns_resolve.h
@@ -6,8 +6,20 @@
#define NFS_DNS_HOSTNAME_MAXLEN (128)
+
+#ifdef CONFIG_NFS_USE_KERNEL_DNS
+static inline int nfs_dns_resolver_init(void)
+{
+ return 0;
+}
+
+static inline void nfs_dns_resolver_destroy(void)
+{}
+#else
extern int nfs_dns_resolver_init(void);
extern void nfs_dns_resolver_destroy(void);
+#endif
+
extern ssize_t nfs_dns_resolve_name(char *name, size_t namelen,
struct sockaddr *sa, size_t salen);
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index eb8f73c..756566f 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -17,9 +17,9 @@
old->data_type == new->data_type &&
old->tgid == new->tgid) {
switch (old->data_type) {
- case (FSNOTIFY_EVENT_FILE):
- if ((old->file->f_path.mnt == new->file->f_path.mnt) &&
- (old->file->f_path.dentry == new->file->f_path.dentry))
+ case (FSNOTIFY_EVENT_PATH):
+ if ((old->path.mnt == new->path.mnt) &&
+ (old->path.dentry == new->path.dentry))
return true;
case (FSNOTIFY_EVENT_NONE):
return true;
@@ -174,7 +174,7 @@
return false;
/* if we don't have enough info to send an event to userspace say no */
- if (data_type != FSNOTIFY_EVENT_FILE)
+ if (data_type != FSNOTIFY_EVENT_PATH)
return false;
if (inode_mark && vfsmnt_mark) {
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 25a3b4d..032b837 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -65,7 +65,7 @@
if (client_fd < 0)
return client_fd;
- if (event->data_type != FSNOTIFY_EVENT_FILE) {
+ if (event->data_type != FSNOTIFY_EVENT_PATH) {
WARN_ON(1);
put_unused_fd(client_fd);
return -EINVAL;
@@ -75,8 +75,8 @@
* we need a new file handle for the userspace program so it can read even if it was
* originally opened O_WRONLY.
*/
- dentry = dget(event->file->f_path.dentry);
- mnt = mntget(event->file->f_path.mnt);
+ dentry = dget(event->path.dentry);
+ mnt = mntget(event->path.mnt);
/* it's possible this event was an overflow event. in that case dentry and mnt
* are NULL; That's fine, just don't call dentry open */
if (dentry && mnt)
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 4d2a82c..3970392 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -84,7 +84,7 @@
}
/* Notify this dentry's parent about a child's events. */
-void __fsnotify_parent(struct file *file, struct dentry *dentry, __u32 mask)
+void __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
{
struct dentry *parent;
struct inode *p_inode;
@@ -92,7 +92,7 @@
bool should_update_children = false;
if (!dentry)
- dentry = file->f_path.dentry;
+ dentry = path->dentry;
if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED))
return;
@@ -124,8 +124,8 @@
* specifies these are events which came from a child. */
mask |= FS_EVENT_ON_CHILD;
- if (file)
- fsnotify(p_inode, mask, file, FSNOTIFY_EVENT_FILE,
+ if (path)
+ fsnotify(p_inode, mask, path, FSNOTIFY_EVENT_PATH,
dentry->d_name.name, 0);
else
fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE,
@@ -217,8 +217,8 @@
/* global tests shouldn't care about events on child only the specific event */
__u32 test_mask = (mask & ~FS_EVENT_ON_CHILD);
- if (data_is == FSNOTIFY_EVENT_FILE)
- mnt = ((struct file *)data)->f_path.mnt;
+ if (data_is == FSNOTIFY_EVENT_PATH)
+ mnt = ((struct path *)data)->mnt;
else
mnt = NULL;
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
index 5e73eeb..a91b69a 100644
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -52,9 +52,9 @@
!strcmp(old->file_name, new->file_name))
return true;
break;
- case (FSNOTIFY_EVENT_FILE):
- if ((old->file->f_path.mnt == new->file->f_path.mnt) &&
- (old->file->f_path.dentry == new->file->f_path.dentry))
+ case (FSNOTIFY_EVENT_PATH):
+ if ((old->path.mnt == new->path.mnt) &&
+ (old->path.dentry == new->path.dentry))
return true;
break;
case (FSNOTIFY_EVENT_NONE):
@@ -147,10 +147,10 @@
__u32 mask, void *data, int data_type)
{
if ((inode_mark->mask & FS_EXCL_UNLINK) &&
- (data_type == FSNOTIFY_EVENT_FILE)) {
- struct file *file = data;
+ (data_type == FSNOTIFY_EVENT_PATH)) {
+ struct path *path = data;
- if (d_unlinked(file->f_path.dentry))
+ if (d_unlinked(path->dentry))
return false;
}
diff --git a/fs/notify/notification.c b/fs/notify/notification.c
index d6c435a..f39260f 100644
--- a/fs/notify/notification.c
+++ b/fs/notify/notification.c
@@ -31,7 +31,6 @@
* allocated and used.
*/
-#include <linux/file.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -90,8 +89,8 @@
if (atomic_dec_and_test(&event->refcnt)) {
pr_debug("%s: event=%p\n", __func__, event);
- if (event->data_type == FSNOTIFY_EVENT_FILE)
- fput(event->file);
+ if (event->data_type == FSNOTIFY_EVENT_PATH)
+ path_put(&event->path);
BUG_ON(!list_empty(&event->private_data_list));
@@ -376,8 +375,8 @@
}
}
event->tgid = get_pid(old_event->tgid);
- if (event->data_type == FSNOTIFY_EVENT_FILE)
- get_file(event->file);
+ if (event->data_type == FSNOTIFY_EVENT_PATH)
+ path_get(&event->path);
return event;
}
@@ -424,22 +423,11 @@
event->data_type = data_type;
switch (data_type) {
- case FSNOTIFY_EVENT_FILE: {
- event->file = data;
- /*
- * if this file is about to disappear hold an extra reference
- * until we return to __fput so we don't have to worry about
- * future get/put destroying the file under us or generating
- * additional events. Notice that we change f_mode without
- * holding f_lock. This is safe since this is the only possible
- * reference to this object in the kernel (it was about to be
- * freed, remember?)
- */
- if (!atomic_long_read(&event->file->f_count)) {
- event->file->f_mode |= FMODE_NONOTIFY;
- get_file(event->file);
- }
- get_file(event->file);
+ case FSNOTIFY_EVENT_PATH: {
+ struct path *path = data;
+ event->path.dentry = path->dentry;
+ event->path.mnt = path->mnt;
+ path_get(&event->path);
break;
}
case FSNOTIFY_EVENT_INODE:
@@ -447,7 +435,8 @@
break;
case FSNOTIFY_EVENT_NONE:
event->inode = NULL;
- event->file = NULL;
+ event->path.dentry = NULL;
+ event->path.mnt = NULL;
break;
default:
BUG();
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 848480b..2308fbb 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -129,7 +129,7 @@
/**
* dev_hw_addr_random - Create random MAC and set device flag
* @dev: pointer to net_device structure
- * @addr: Pointer to a six-byte array containing the Ethernet address
+ * @hwaddr: Pointer to a six-byte array containing the Ethernet address
*
* Generate random MAC to be used by a device and set addr_assign_type
* so the state can be read by sysfs and be used by udev.
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index e4e2204..59d0df4 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -26,18 +26,19 @@
}
/* Notify this dentry's parent about a child's events. */
-static inline void fsnotify_parent(struct file *file, struct dentry *dentry, __u32 mask)
+static inline void fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
{
if (!dentry)
- dentry = file->f_path.dentry;
+ dentry = path->dentry;
- __fsnotify_parent(file, dentry, mask);
+ __fsnotify_parent(path, dentry, mask);
}
/* simple call site for access decisions */
static inline int fsnotify_perm(struct file *file, int mask)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct path *path = &file->f_path;
+ struct inode *inode = path->dentry->d_inode;
__u32 fsnotify_mask = 0;
if (file->f_mode & FMODE_NONOTIFY)
@@ -51,7 +52,7 @@
else
BUG();
- return fsnotify(inode, fsnotify_mask, file, FSNOTIFY_EVENT_FILE, NULL, 0);
+ return fsnotify(inode, fsnotify_mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
}
/*
@@ -186,15 +187,16 @@
*/
static inline void fsnotify_access(struct file *file)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct path *path = &file->f_path;
+ struct inode *inode = path->dentry->d_inode;
__u32 mask = FS_ACCESS;
if (S_ISDIR(inode->i_mode))
mask |= FS_IN_ISDIR;
if (!(file->f_mode & FMODE_NONOTIFY)) {
- fsnotify_parent(file, NULL, mask);
- fsnotify(inode, mask, file, FSNOTIFY_EVENT_FILE, NULL, 0);
+ fsnotify_parent(path, NULL, mask);
+ fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
}
}
@@ -203,15 +205,16 @@
*/
static inline void fsnotify_modify(struct file *file)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct path *path = &file->f_path;
+ struct inode *inode = path->dentry->d_inode;
__u32 mask = FS_MODIFY;
if (S_ISDIR(inode->i_mode))
mask |= FS_IN_ISDIR;
if (!(file->f_mode & FMODE_NONOTIFY)) {
- fsnotify_parent(file, NULL, mask);
- fsnotify(inode, mask, file, FSNOTIFY_EVENT_FILE, NULL, 0);
+ fsnotify_parent(path, NULL, mask);
+ fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
}
}
@@ -220,15 +223,16 @@
*/
static inline void fsnotify_open(struct file *file)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct path *path = &file->f_path;
+ struct inode *inode = path->dentry->d_inode;
__u32 mask = FS_OPEN;
if (S_ISDIR(inode->i_mode))
mask |= FS_IN_ISDIR;
if (!(file->f_mode & FMODE_NONOTIFY)) {
- fsnotify_parent(file, NULL, mask);
- fsnotify(inode, mask, file, FSNOTIFY_EVENT_FILE, NULL, 0);
+ fsnotify_parent(path, NULL, mask);
+ fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
}
}
@@ -237,6 +241,7 @@
*/
static inline void fsnotify_close(struct file *file)
{
+ struct path *path = &file->f_path;
struct inode *inode = file->f_path.dentry->d_inode;
fmode_t mode = file->f_mode;
__u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE;
@@ -245,8 +250,8 @@
mask |= FS_IN_ISDIR;
if (!(file->f_mode & FMODE_NONOTIFY)) {
- fsnotify_parent(file, NULL, mask);
- fsnotify(inode, mask, file, FSNOTIFY_EVENT_FILE, NULL, 0);
+ fsnotify_parent(path, NULL, mask);
+ fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
}
}
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 9bbfd72..ed36fb5 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -203,20 +203,20 @@
/* to_tell may ONLY be dereferenced during handle_event(). */
struct inode *to_tell; /* either the inode the event happened to or its parent */
/*
- * depending on the event type we should have either a file or inode
- * We hold a reference on file, but NOT on inode. Since we have the ref on
- * the file, it may be dereferenced at any point during this object's
+ * depending on the event type we should have either a path or inode
+ * We hold a reference on path, but NOT on inode. Since we have the ref on
+ * the path, it may be dereferenced at any point during this object's
* lifetime. That reference is dropped when this object's refcnt hits
- * 0. If this event contains an inode instead of a file, the inode may
+ * 0. If this event contains an inode instead of a path, the inode may
* ONLY be used during handle_event().
*/
union {
- struct file *file;
+ struct path path;
struct inode *inode;
};
/* when calling fsnotify tell it if the data is a path or inode */
#define FSNOTIFY_EVENT_NONE 0
-#define FSNOTIFY_EVENT_FILE 1
+#define FSNOTIFY_EVENT_PATH 1
#define FSNOTIFY_EVENT_INODE 2
int data_type; /* which of the above union we have */
atomic_t refcnt; /* how many groups still are using/need to send this event */
@@ -293,7 +293,7 @@
/* main fsnotify call to send events */
extern int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
const unsigned char *name, u32 cookie);
-extern void __fsnotify_parent(struct file *file, struct dentry *dentry, __u32 mask);
+extern void __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask);
extern void __fsnotify_inode_delete(struct inode *inode);
extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt);
extern u32 fsnotify_get_cookie(void);
@@ -422,7 +422,7 @@
return 0;
}
-static inline void __fsnotify_parent(struct file *file, struct dentry *dentry, __u32 mask)
+static inline void __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
{}
static inline void __fsnotify_inode_delete(struct inode *inode)
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index 413742c..791d510 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -122,7 +122,7 @@
}
#else
-static inline int netpoll_rx(struct sk_buff *skb)
+static inline bool netpoll_rx(struct sk_buff *skb)
{
return 0;
}
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 636724b..6c24144 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -33,9 +33,9 @@
#define L2CAP_DEFAULT_FLUSH_TO 0xffff
#define L2CAP_DEFAULT_TX_WINDOW 63
#define L2CAP_DEFAULT_MAX_TX 3
-#define L2CAP_DEFAULT_RETRANS_TO 1000 /* 1 second */
+#define L2CAP_DEFAULT_RETRANS_TO 2000 /* 2 seconds */
#define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */
-#define L2CAP_DEFAULT_MAX_PDU_SIZE 672
+#define L2CAP_DEFAULT_MAX_PDU_SIZE 1009 /* Sized for 3-DH5 packet */
#define L2CAP_DEFAULT_ACK_TO 200
#define L2CAP_LOCAL_BUSY_TRIES 12
diff --git a/include/net/sock.h b/include/net/sock.h
index a441c9c..ac53bfb 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -195,7 +195,8 @@
* @sk_priority: %SO_PRIORITY setting
* @sk_type: socket type (%SOCK_STREAM, etc)
* @sk_protocol: which protocol this socket belongs in this network family
- * @sk_peercred: %SO_PEERCRED setting
+ * @sk_peer_pid: &struct pid for this socket's peer
+ * @sk_peer_cred: %SO_PEERCRED setting
* @sk_rcvlowat: %SO_RCVLOWAT setting
* @sk_rcvtimeo: %SO_RCVTIMEO setting
* @sk_sndtimeo: %SO_SNDTIMEO setting
@@ -211,6 +212,7 @@
* @sk_send_head: front of stuff to transmit
* @sk_security: used by security modules
* @sk_mark: generic packet mark
+ * @sk_classid: this socket's cgroup classid
* @sk_write_pending: a write to stream socket waits to start
* @sk_state_change: callback to indicate change in the state of the sock
* @sk_data_ready: callback to indicate there is data to be processed
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index 6bf2306..f0c9b2e 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -526,8 +526,8 @@
BUG_ON(group != audit_watch_group);
switch (event->data_type) {
- case (FSNOTIFY_EVENT_FILE):
- inode = event->file->f_path.dentry->d_inode;
+ case (FSNOTIFY_EVENT_PATH):
+ inode = event->path.dentry->d_inode;
break;
case (FSNOTIFY_EVENT_INODE):
inode = event->inode;
diff --git a/mm/memory.c b/mm/memory.c
index 858829d..9b3b73f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2760,6 +2760,26 @@
}
/*
+ * This is like a special single-page "expand_downwards()",
+ * except we must first make sure that 'address-PAGE_SIZE'
+ * doesn't hit another vma.
+ *
+ * The "find_vma()" will do the right thing even if we wrap
+ */
+static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
+{
+ address &= PAGE_MASK;
+ if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
+ address -= PAGE_SIZE;
+ if (find_vma(vma->vm_mm, address) != vma)
+ return -ENOMEM;
+
+ expand_stack(vma, address);
+ }
+ return 0;
+}
+
+/*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
@@ -2772,6 +2792,11 @@
spinlock_t *ptl;
pte_t entry;
+ if (check_stack_guard_page(vma, address) < 0) {
+ pte_unmap(page_table);
+ return VM_FAULT_SIGBUS;
+ }
+
if (!(flags & FAULT_FLAG_WRITE)) {
entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
vma->vm_page_prot));
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 3e3cd9d..fadf26b 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -2705,8 +2705,9 @@
case L2CAP_MODE_ERTM:
pi->remote_tx_win = rfc.txwin_size;
pi->remote_max_tx = rfc.max_transmit;
- if (rfc.max_pdu_size > pi->conn->mtu - 10)
- rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
+
+ if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
+ rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
@@ -2723,8 +2724,8 @@
break;
case L2CAP_MODE_STREAMING:
- if (rfc.max_pdu_size > pi->conn->mtu - 10)
- rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
+ if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
+ rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
@@ -2806,7 +2807,6 @@
if (*result == L2CAP_CONF_SUCCESS) {
switch (rfc.mode) {
case L2CAP_MODE_ERTM:
- pi->remote_tx_win = rfc.txwin_size;
pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
pi->mps = le16_to_cpu(rfc.max_pdu_size);
@@ -2862,7 +2862,6 @@
done:
switch (rfc.mode) {
case L2CAP_MODE_ERTM:
- pi->remote_tx_win = rfc.txwin_size;
pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
pi->mps = le16_to_cpu(rfc.max_pdu_size);
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index 01f238f..c49a669 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -9,7 +9,7 @@
#include <linux/hardirq.h>
#include <net/caif/cfpkt.h>
-#define PKT_PREFIX 16
+#define PKT_PREFIX 48
#define PKT_POSTFIX 2
#define PKT_LEN_WHEN_EXTENDING 128
#define PKT_ERROR(pkt, errmsg) do { \
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 9c65e9d..08ffe9e 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -60,6 +60,13 @@
#include <net/sock.h>
#include <net/net_namespace.h>
+/*
+ * To send multiple CAN frame content within TX_SETUP or to filter
+ * CAN messages with multiplex index within RX_SETUP, the number of
+ * different filters is limited to 256 due to the one byte index value.
+ */
+#define MAX_NFRAMES 256
+
/* use of last_frames[index].can_dlc */
#define RX_RECV 0x40 /* received data for this element */
#define RX_THR 0x80 /* element not been sent due to throttle feature */
@@ -89,16 +96,16 @@
struct list_head list;
int ifindex;
canid_t can_id;
- int flags;
+ u32 flags;
unsigned long frames_abs, frames_filtered;
struct timeval ival1, ival2;
struct hrtimer timer, thrtimer;
struct tasklet_struct tsklet, thrtsklet;
ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
int rx_ifindex;
- int count;
- int nframes;
- int currframe;
+ u32 count;
+ u32 nframes;
+ u32 currframe;
struct can_frame *frames;
struct can_frame *last_frames;
struct can_frame sframe;
@@ -175,7 +182,7 @@
seq_printf(m, "rx_op: %03X %-5s ",
op->can_id, bcm_proc_getifname(ifname, op->ifindex));
- seq_printf(m, "[%d]%c ", op->nframes,
+ seq_printf(m, "[%u]%c ", op->nframes,
(op->flags & RX_CHECK_DLC)?'d':' ');
if (op->kt_ival1.tv64)
seq_printf(m, "timeo=%lld ",
@@ -198,7 +205,7 @@
list_for_each_entry(op, &bo->tx_ops, list) {
- seq_printf(m, "tx_op: %03X %s [%d] ",
+ seq_printf(m, "tx_op: %03X %s [%u] ",
op->can_id,
bcm_proc_getifname(ifname, op->ifindex),
op->nframes);
@@ -283,7 +290,7 @@
struct can_frame *firstframe;
struct sockaddr_can *addr;
struct sock *sk = op->sk;
- int datalen = head->nframes * CFSIZ;
+ unsigned int datalen = head->nframes * CFSIZ;
int err;
skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
@@ -468,7 +475,7 @@
* bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
* received data stored in op->last_frames[]
*/
-static void bcm_rx_cmp_to_index(struct bcm_op *op, int index,
+static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
const struct can_frame *rxdata)
{
/*
@@ -554,7 +561,8 @@
/*
* bcm_rx_do_flush - helper for bcm_rx_thr_flush
*/
-static inline int bcm_rx_do_flush(struct bcm_op *op, int update, int index)
+static inline int bcm_rx_do_flush(struct bcm_op *op, int update,
+ unsigned int index)
{
if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) {
if (update)
@@ -575,7 +583,7 @@
int updated = 0;
if (op->nframes > 1) {
- int i;
+ unsigned int i;
/* for MUX filter we start at index 1 */
for (i = 1; i < op->nframes; i++)
@@ -624,7 +632,7 @@
{
struct bcm_op *op = (struct bcm_op *)data;
const struct can_frame *rxframe = (struct can_frame *)skb->data;
- int i;
+ unsigned int i;
/* disable timeout */
hrtimer_cancel(&op->timer);
@@ -822,14 +830,15 @@
{
struct bcm_sock *bo = bcm_sk(sk);
struct bcm_op *op;
- int i, err;
+ unsigned int i;
+ int err;
/* we need a real device to send frames */
if (!ifindex)
return -ENODEV;
- /* we need at least one can_frame */
- if (msg_head->nframes < 1)
+ /* check nframes boundaries - we need at least one can_frame */
+ if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
return -EINVAL;
/* check the given can_id */
@@ -993,6 +1002,10 @@
msg_head->nframes = 0;
}
+ /* the first element contains the mux-mask => MAX_NFRAMES + 1 */
+ if (msg_head->nframes > MAX_NFRAMES + 1)
+ return -EINVAL;
+
if ((msg_head->flags & RX_RTR_FRAME) &&
((msg_head->nframes != 1) ||
(!(msg_head->can_id & CAN_RTR_FLAG))))
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 400a04d..739435a 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -29,6 +29,7 @@
#include <linux/kernel.h>
#include <linux/keyctl.h>
#include <linux/err.h>
+#include <linux/seq_file.h>
#include <keys/dns_resolver-type.h>
#include <keys/user-type.h>
#include "internal.h"
@@ -43,6 +44,8 @@
const struct cred *dns_resolver_cache;
+#define DNS_ERRORNO_OPTION "dnserror"
+
/*
* Instantiate a user defined key for dns_resolver.
*
@@ -59,9 +62,10 @@
dns_resolver_instantiate(struct key *key, const void *_data, size_t datalen)
{
struct user_key_payload *upayload;
+ unsigned long derrno;
int ret;
size_t result_len = 0;
- const char *data = _data, *opt;
+ const char *data = _data, *end, *opt;
kenter("%%%d,%s,'%s',%zu",
key->serial, key->description, data, datalen);
@@ -71,13 +75,77 @@
datalen--;
/* deal with any options embedded in the data */
+ end = data + datalen;
opt = memchr(data, '#', datalen);
if (!opt) {
- kdebug("no options currently supported");
- return -EINVAL;
+ /* no options: the entire data is the result */
+ kdebug("no options");
+ result_len = datalen;
+ } else {
+ const char *next_opt;
+
+ result_len = opt - data;
+ opt++;
+ kdebug("options: '%s'", opt);
+ do {
+ const char *eq;
+ int opt_len, opt_nlen, opt_vlen, tmp;
+
+ next_opt = memchr(opt, '#', end - opt) ?: end;
+ opt_len = next_opt - opt;
+ if (!opt_len) {
+ printk(KERN_WARNING
+ "Empty option to dns_resolver key %d\n",
+ key->serial);
+ return -EINVAL;
+ }
+
+ eq = memchr(opt, '=', opt_len) ?: end;
+ opt_nlen = eq - opt;
+ eq++;
+ opt_vlen = next_opt - eq; /* will be -1 if no value */
+
+ tmp = opt_vlen >= 0 ? opt_vlen : 0;
+ kdebug("option '%*.*s' val '%*.*s'",
+ opt_nlen, opt_nlen, opt, tmp, tmp, eq);
+
+ /* see if it's an error number representing a DNS error
+ * that's to be recorded as the result in this key */
+ if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 &&
+ memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) {
+ kdebug("dns error number option");
+ if (opt_vlen <= 0)
+ goto bad_option_value;
+
+ ret = strict_strtoul(eq, 10, &derrno);
+ if (ret < 0)
+ goto bad_option_value;
+
+ if (derrno < 1 || derrno > 511)
+ goto bad_option_value;
+
+ kdebug("dns error no. = %lu", derrno);
+ key->type_data.x[0] = -derrno;
+ continue;
+ }
+
+ bad_option_value:
+ printk(KERN_WARNING
+ "Option '%*.*s' to dns_resolver key %d:"
+ " bad/missing value\n",
+ opt_nlen, opt_nlen, opt, key->serial);
+ return -EINVAL;
+ } while (opt = next_opt + 1, opt < end);
}
- result_len = datalen;
+ /* don't cache the result if we're caching an error saying there's no
+ * result */
+ if (key->type_data.x[0]) {
+ kleave(" = 0 [h_error %ld]", key->type_data.x[0]);
+ return 0;
+ }
+
+ kdebug("store result");
ret = key_payload_reserve(key, result_len);
if (ret < 0)
return -EINVAL;
@@ -135,13 +203,27 @@
return ret;
}
+/*
+ * Describe a DNS key
+ */
+static void dns_resolver_describe(const struct key *key, struct seq_file *m)
+{
+ int err = key->type_data.x[0];
+
+ seq_puts(m, key->description);
+ if (err)
+ seq_printf(m, ": %d", err);
+ else
+ seq_printf(m, ": %u", key->datalen);
+}
+
struct key_type key_type_dns_resolver = {
.name = "dns_resolver",
.instantiate = dns_resolver_instantiate,
.match = dns_resolver_match,
.revoke = user_revoke,
.destroy = user_destroy,
- .describe = user_describe,
+ .describe = dns_resolver_describe,
.read = user_read,
};
diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
index 03d5255..c32be29 100644
--- a/net/dns_resolver/dns_query.c
+++ b/net/dns_resolver/dns_query.c
@@ -136,6 +136,11 @@
if (ret < 0)
goto put;
+ /* If the DNS server gave an error, return that to the caller */
+ ret = rkey->type_data.x[0];
+ if (ret)
+ goto put;
+
upayload = rcu_dereference_protected(rkey->payload.data,
lockdep_is_held(&rkey->sem));
len = upayload->datalen;
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 1120178..87bb5f4 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -1,7 +1,7 @@
menuconfig NET_DSA
bool "Distributed Switch Architecture support"
default n
- depends on EXPERIMENTAL && NET_ETHERNET && !S390
+ depends on EXPERIMENTAL && NETDEVICES && !S390
select PHYLIB
---help---
This allows you to use hardware switch chips that use
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index b9e8c3b..408eea7 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -150,22 +150,34 @@
if (qops->enqueue == NULL)
qops->enqueue = noop_qdisc_ops.enqueue;
if (qops->peek == NULL) {
- if (qops->dequeue == NULL) {
+ if (qops->dequeue == NULL)
qops->peek = noop_qdisc_ops.peek;
- } else {
- rc = -EINVAL;
- goto out;
- }
+ else
+ goto out_einval;
}
if (qops->dequeue == NULL)
qops->dequeue = noop_qdisc_ops.dequeue;
+ if (qops->cl_ops) {
+ const struct Qdisc_class_ops *cops = qops->cl_ops;
+
+ if (!(cops->get && cops->put && cops->walk && cops->leaf))
+ goto out_einval;
+
+ if (cops->tcf_chain && !(cops->bind_tcf && cops->unbind_tcf))
+ goto out_einval;
+ }
+
qops->next = NULL;
*qp = qops;
rc = 0;
out:
write_unlock(&qdisc_mod_lock);
return rc;
+
+out_einval:
+ rc = -EINVAL;
+ goto out;
}
EXPORT_SYMBOL(register_qdisc);
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index e114f23..34066278 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -418,7 +418,7 @@
}
ret = qdisc_enqueue(skb, flow->q);
- if (ret != 0) {
+ if (ret != NET_XMIT_SUCCESS) {
drop: __maybe_unused
if (net_xmit_drop_count(ret)) {
sch->qstats.drops++;
@@ -442,7 +442,7 @@
*/
if (flow == &p->link) {
sch->q.qlen++;
- return 0;
+ return NET_XMIT_SUCCESS;
}
tasklet_schedule(&p->task);
return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 534f332..201cbac 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -334,7 +334,7 @@
if (++sch->q.qlen <= q->limit) {
sch->bstats.bytes += qdisc_pkt_len(skb);
sch->bstats.packets++;
- return 0;
+ return NET_XMIT_SUCCESS;
}
sfq_drop(sch);
@@ -508,6 +508,11 @@
return -1;
}
+static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
+{
+ return NULL;
+}
+
static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
{
return 0;
@@ -519,6 +524,10 @@
return 0;
}
+static void sfq_put(struct Qdisc *q, unsigned long cl)
+{
+}
+
static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
{
struct sfq_sched_data *q = qdisc_priv(sch);
@@ -571,9 +580,12 @@
}
static const struct Qdisc_class_ops sfq_class_ops = {
+ .leaf = sfq_leaf,
.get = sfq_get,
+ .put = sfq_put,
.tcf_chain = sfq_find_tcf,
.bind_tcf = sfq_bind,
+ .unbind_tcf = sfq_put,
.dump = sfq_dump_class,
.dump_stats = sfq_dump_class_stats,
.walk = sfq_walk,
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 0991c64..641a30d 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -127,7 +127,7 @@
return qdisc_reshape_fail(skb, sch);
ret = qdisc_enqueue(skb, q->qdisc);
- if (ret != 0) {
+ if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret))
sch->qstats.drops++;
return ret;
@@ -136,7 +136,7 @@
sch->q.qlen++;
sch->bstats.bytes += qdisc_pkt_len(skb);
sch->bstats.packets++;
- return 0;
+ return NET_XMIT_SUCCESS;
}
static unsigned int tbf_drop(struct Qdisc* sch)
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 807643b..feaabc1 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -85,7 +85,7 @@
__skb_queue_tail(&q->q, skb);
sch->bstats.bytes += qdisc_pkt_len(skb);
sch->bstats.packets++;
- return 0;
+ return NET_XMIT_SUCCESS;
}
kfree_skb(skb);
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index e74a1a2..d1a3fb9 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -843,13 +843,19 @@
return -EINVAL;
if (mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) {
/* Verify that we are associated with the destination AP */
+ wdev_lock(wdev);
+
if (!wdev->current_bss ||
memcmp(wdev->current_bss->pub.bssid, mgmt->bssid,
ETH_ALEN) != 0 ||
(wdev->iftype == NL80211_IFTYPE_STATION &&
memcmp(wdev->current_bss->pub.bssid, mgmt->da,
- ETH_ALEN) != 0))
+ ETH_ALEN) != 0)) {
+ wdev_unlock(wdev);
return -ENOTCONN;
+ }
+
+ wdev_unlock(wdev);
}
if (memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0)