blob: 5d8c095ec5ecaeb053169bfdca520d17530d4a39 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* net/sched/sch_teql.c "True" (or "trivial") link equalizer.
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU General Public License
5 * as published by the Free Software Foundation; either version
6 * 2 of the License, or (at your option) any later version.
7 *
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
9 */
10
11#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/types.h>
13#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/errno.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020017#include <linux/if_arp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/netdevice.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/skbuff.h>
21#include <linux/moduleparam.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070022#include <net/dst.h>
23#include <net/neighbour.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <net/pkt_sched.h>
25
26/*
27 How to setup it.
28 ----------------
29
30 After loading this module you will find a new device teqlN
31 and new qdisc with the same name. To join a slave to the equalizer
32 you should just set this qdisc on a device f.e.
33
34 # tc qdisc add dev eth0 root teql0
35 # tc qdisc add dev eth1 root teql0
36
37 That's all. Full PnP 8)
38
39 Applicability.
40 --------------
41
42 1. Slave devices MUST be active devices, i.e., they must raise the tbusy
43 signal and generate EOI events. If you want to equalize virtual devices
44 like tunnels, use a normal eql device.
45 2. This device puts no limitations on physical slave characteristics
46 f.e. it will equalize 9600baud line and 100Mb ethernet perfectly :-)
47 Certainly, large difference in link speeds will make the resulting
48 eqalized link unusable, because of huge packet reordering.
49 I estimate an upper useful difference as ~10 times.
50 3. If the slave requires address resolution, only protocols using
51 neighbour cache (IPv4/IPv6) will work over the equalized link.
52 Other protocols are still allowed to use the slave device directly,
53 which will not break load balancing, though native slave
54 traffic will have the highest priority. */
55
Eric Dumazetcc7ec452011-01-19 19:26:56 +000056struct teql_master {
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 struct Qdisc_ops qops;
58 struct net_device *dev;
59 struct Qdisc *slaves;
60 struct list_head master_list;
Eric Dumazet1ac9ad12011-01-12 12:13:14 +000061 unsigned long tx_bytes;
62 unsigned long tx_packets;
63 unsigned long tx_errors;
64 unsigned long tx_dropped;
Linus Torvalds1da177e2005-04-16 15:20:36 -070065};
66
Eric Dumazetcc7ec452011-01-19 19:26:56 +000067struct teql_sched_data {
Linus Torvalds1da177e2005-04-16 15:20:36 -070068 struct Qdisc *next;
69 struct teql_master *m;
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 struct sk_buff_head q;
71};
72
Eric Dumazetcc7ec452011-01-19 19:26:56 +000073#define NEXT_SLAVE(q) (((struct teql_sched_data *)qdisc_priv(q))->next)
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
Eric Dumazetcc7ec452011-01-19 19:26:56 +000075#define FMASK (IFF_BROADCAST | IFF_POINTOPOINT)
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
77/* "teql*" qdisc routines */
78
79static int
Eric Dumazet520ac302016-06-21 23:16:49 -070080teql_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
Linus Torvalds1da177e2005-04-16 15:20:36 -070081{
David S. Miller5ce2d482008-07-08 17:06:30 -070082 struct net_device *dev = qdisc_dev(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 struct teql_sched_data *q = qdisc_priv(sch);
84
Krishna Kumar4cd8c9e2007-05-08 18:57:50 -070085 if (q->q.qlen < dev->tx_queue_len) {
86 __skb_queue_tail(&q->q, skb);
Ben Greear9871e502010-08-10 01:45:40 -070087 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 }
89
Eric Dumazet520ac302016-06-21 23:16:49 -070090 return qdisc_drop(skb, sch, to_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -070091}
92
Linus Torvalds1da177e2005-04-16 15:20:36 -070093static struct sk_buff *
Eric Dumazetcc7ec452011-01-19 19:26:56 +000094teql_dequeue(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
96 struct teql_sched_data *dat = qdisc_priv(sch);
David S. Millerb0e1e642008-07-08 17:42:10 -070097 struct netdev_queue *dat_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 struct sk_buff *skb;
John Fastabend46e5da42014-09-12 20:04:52 -070099 struct Qdisc *q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
101 skb = __skb_dequeue(&dat->q);
David S. Millere8a04642008-07-17 00:34:19 -0700102 dat_queue = netdev_get_tx_queue(dat->m->dev, 0);
John Fastabend46e5da42014-09-12 20:04:52 -0700103 q = rcu_dereference_bh(dat_queue->qdisc);
104
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 if (skb == NULL) {
John Fastabend46e5da42014-09-12 20:04:52 -0700106 struct net_device *m = qdisc_dev(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 if (m) {
108 dat->m->slaves = sch;
109 netif_wake_queue(m);
110 }
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800111 } else {
112 qdisc_bstats_update(sch, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 }
John Fastabend46e5da42014-09-12 20:04:52 -0700114 sch->q.qlen = dat->q.qlen + q->q.qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 return skb;
116}
117
Jarek Poplawski8e3af972008-10-31 00:45:55 -0700118static struct sk_buff *
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000119teql_peek(struct Qdisc *sch)
Jarek Poplawski8e3af972008-10-31 00:45:55 -0700120{
121 /* teql is meant to be used as root qdisc */
122 return NULL;
123}
124
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125static void
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000126teql_reset(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127{
128 struct teql_sched_data *dat = qdisc_priv(sch);
129
130 skb_queue_purge(&dat->q);
131 sch->q.qlen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132}
133
134static void
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000135teql_destroy(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136{
137 struct Qdisc *q, *prev;
138 struct teql_sched_data *dat = qdisc_priv(sch);
139 struct teql_master *master = dat->m;
140
Pavel Tikhomirov3e9861a2021-04-08 18:14:31 +0300141 if (!master)
142 return;
143
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000144 prev = master->slaves;
145 if (prev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 do {
147 q = NEXT_SLAVE(prev);
148 if (q == sch) {
149 NEXT_SLAVE(prev) = NEXT_SLAVE(q);
150 if (q == master->slaves) {
151 master->slaves = NEXT_SLAVE(q);
152 if (q == master->slaves) {
David S. Millere8a04642008-07-17 00:34:19 -0700153 struct netdev_queue *txq;
David S. Miller83874002008-07-17 00:53:03 -0700154 spinlock_t *root_lock;
David S. Millere8a04642008-07-17 00:34:19 -0700155
156 txq = netdev_get_tx_queue(master->dev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 master->slaves = NULL;
David S. Miller83874002008-07-17 00:53:03 -0700158
John Fastabend46e5da42014-09-12 20:04:52 -0700159 root_lock = qdisc_root_sleeping_lock(rtnl_dereference(txq->qdisc));
David S. Miller83874002008-07-17 00:53:03 -0700160 spin_lock_bh(root_lock);
John Fastabend46e5da42014-09-12 20:04:52 -0700161 qdisc_reset(rtnl_dereference(txq->qdisc));
David S. Miller83874002008-07-17 00:53:03 -0700162 spin_unlock_bh(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 }
164 }
165 skb_queue_purge(&dat->q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 break;
167 }
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900168
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 } while ((prev = q) != master->slaves);
170 }
171}
172
Patrick McHardy1e904742008-01-22 22:11:17 -0800173static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174{
David S. Miller5ce2d482008-07-08 17:06:30 -0700175 struct net_device *dev = qdisc_dev(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000176 struct teql_master *m = (struct teql_master *)sch->ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 struct teql_sched_data *q = qdisc_priv(sch);
178
179 if (dev->hard_header_len > m->dev->hard_header_len)
180 return -EINVAL;
181
182 if (m->dev == dev)
183 return -ELOOP;
184
185 q->m = m;
186
187 skb_queue_head_init(&q->q);
188
189 if (m->slaves) {
190 if (m->dev->flags & IFF_UP) {
Joe Perchesf64f9e72009-11-29 16:55:45 -0800191 if ((m->dev->flags & IFF_POINTOPOINT &&
192 !(dev->flags & IFF_POINTOPOINT)) ||
193 (m->dev->flags & IFF_BROADCAST &&
194 !(dev->flags & IFF_BROADCAST)) ||
195 (m->dev->flags & IFF_MULTICAST &&
196 !(dev->flags & IFF_MULTICAST)) ||
197 dev->mtu < m->dev->mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 return -EINVAL;
199 } else {
200 if (!(dev->flags&IFF_POINTOPOINT))
201 m->dev->flags &= ~IFF_POINTOPOINT;
202 if (!(dev->flags&IFF_BROADCAST))
203 m->dev->flags &= ~IFF_BROADCAST;
204 if (!(dev->flags&IFF_MULTICAST))
205 m->dev->flags &= ~IFF_MULTICAST;
206 if (dev->mtu < m->dev->mtu)
207 m->dev->mtu = dev->mtu;
208 }
209 q->next = NEXT_SLAVE(m->slaves);
210 NEXT_SLAVE(m->slaves) = sch;
211 } else {
212 q->next = sch;
213 m->slaves = sch;
214 m->dev->mtu = dev->mtu;
215 m->dev->flags = (m->dev->flags&~FMASK)|(dev->flags&FMASK);
216 }
217 return 0;
218}
219
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
221static int
Eric Dumazetf7e57042011-11-30 04:08:58 +0000222__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
223 struct net_device *dev, struct netdev_queue *txq,
David S. Millerdbedbe6d2012-07-02 21:57:45 -0700224 struct dst_entry *dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225{
David S. Millerdbedbe6d2012-07-02 21:57:45 -0700226 struct neighbour *n;
227 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
David S. Millerdbedbe6d2012-07-02 21:57:45 -0700229 n = dst_neigh_lookup_skb(dst, skb);
230 if (!n)
231 return -ENOENT;
232
233 if (dst->dev != dev) {
234 struct neighbour *mn;
235
236 mn = __neigh_lookup_errno(n->tbl, n->primary_key, dev);
237 neigh_release(n);
238 if (IS_ERR(mn))
239 return PTR_ERR(mn);
240 n = mn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 }
David S. Millerdbedbe6d2012-07-02 21:57:45 -0700242
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 if (neigh_event_send(n, skb_res) == 0) {
244 int err;
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +0000245 char haddr[MAX_ADDR_LEN];
Stephen Hemminger0c4e8582007-10-09 01:36:32 -0700246
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +0000247 neigh_ha_snapshot(haddr, n, dev);
Jiri Pirkod8b96052015-01-13 17:13:43 +0100248 err = dev_hard_header(skb, dev, ntohs(tc_skb_protocol(skb)),
249 haddr, NULL, skb->len);
Stephen Hemminger0c4e8582007-10-09 01:36:32 -0700250
David S. Millerdbedbe6d2012-07-02 21:57:45 -0700251 if (err < 0)
252 err = -EINVAL;
253 } else {
254 err = (skb_res == NULL) ? -EAGAIN : 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 }
256 neigh_release(n);
David S. Millerdbedbe6d2012-07-02 21:57:45 -0700257 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258}
259
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700260static inline int teql_resolve(struct sk_buff *skb,
Eric Dumazetf7e57042011-11-30 04:08:58 +0000261 struct sk_buff *skb_res,
262 struct net_device *dev,
263 struct netdev_queue *txq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264{
Eric Dumazetf7e57042011-11-30 04:08:58 +0000265 struct dst_entry *dst = skb_dst(skb);
Eric Dumazetf7e57042011-11-30 04:08:58 +0000266 int res;
267
John Fastabend46e5da42014-09-12 20:04:52 -0700268 if (rcu_access_pointer(txq->qdisc) == &noop_qdisc)
Evgeniy Polyakov4f9f8312007-11-06 03:08:09 -0800269 return -ENODEV;
270
Eric Dumazetf7e57042011-11-30 04:08:58 +0000271 if (!dev->header_ops || !dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 return 0;
Eric Dumazetf7e57042011-11-30 04:08:58 +0000273
274 rcu_read_lock();
David S. Millerdbedbe6d2012-07-02 21:57:45 -0700275 res = __teql_resolve(skb, skb_res, dev, txq, dst);
Eric Dumazetf7e57042011-11-30 04:08:58 +0000276 rcu_read_unlock();
277
278 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279}
280
Stephen Hemminger6fef4c02009-08-31 19:50:41 +0000281static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282{
Patrick McHardy2941a482006-01-08 22:05:26 -0800283 struct teql_master *master = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 struct Qdisc *start, *q;
285 int busy;
286 int nores;
Pavel Emelyanov4e3ab472007-10-21 17:01:29 -0700287 int subq = skb_get_queue_mapping(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 struct sk_buff *skb_res = NULL;
289
290 start = master->slaves;
291
292restart:
293 nores = 0;
294 busy = 0;
295
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000296 q = start;
297 if (!q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 goto drop;
299
300 do {
David S. Miller5ce2d482008-07-08 17:06:30 -0700301 struct net_device *slave = qdisc_dev(q);
Stephen Hemminger61294e22009-01-06 10:45:57 -0800302 struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0);
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900303
David S. Millere8a04642008-07-17 00:34:19 -0700304 if (slave_txq->qdisc_sleeping != q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 continue;
Tom Herbert734664982011-11-28 16:32:44 +0000306 if (netif_xmit_stopped(netdev_get_tx_queue(slave, subq)) ||
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -0700307 !netif_running(slave)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 busy = 1;
309 continue;
310 }
311
Eric Dumazetf7e57042011-11-30 04:08:58 +0000312 switch (teql_resolve(skb, skb_res, slave, slave_txq)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 case 0:
David S. Millerc3f26a22008-07-31 16:58:50 -0700314 if (__netif_tx_trylock(slave_txq)) {
Eric Dumazetc0f84d02009-05-18 15:12:31 -0700315 unsigned int length = qdisc_pkt_len(skb);
316
Tom Herbert734664982011-11-28 16:32:44 +0000317 if (!netif_xmit_frozen_or_stopped(slave_txq) &&
David S. Millerfa2dbdc2014-08-29 21:55:22 -0700318 netdev_start_xmit(skb, slave, slave_txq, false) ==
319 NETDEV_TX_OK) {
David S. Millerc3f26a22008-07-31 16:58:50 -0700320 __netif_tx_unlock(slave_txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 master->slaves = NEXT_SLAVE(q);
322 netif_wake_queue(dev);
Eric Dumazet1ac9ad12011-01-12 12:13:14 +0000323 master->tx_packets++;
324 master->tx_bytes += length;
Patrick McHardy6ed10652009-06-23 06:03:08 +0000325 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 }
David S. Millerc3f26a22008-07-31 16:58:50 -0700327 __netif_tx_unlock(slave_txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 }
Tom Herbert734664982011-11-28 16:32:44 +0000329 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 busy = 1;
331 break;
332 case 1:
333 master->slaves = NEXT_SLAVE(q);
Patrick McHardy6ed10652009-06-23 06:03:08 +0000334 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 default:
336 nores = 1;
337 break;
338 }
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -0300339 __skb_pull(skb, skb_network_offset(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 } while ((q = NEXT_SLAVE(q)) != start);
341
342 if (nores && skb_res == NULL) {
343 skb_res = skb;
344 goto restart;
345 }
346
347 if (busy) {
348 netif_stop_queue(dev);
Patrick McHardy5b548142009-06-12 06:22:29 +0000349 return NETDEV_TX_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 }
Eric Dumazet1ac9ad12011-01-12 12:13:14 +0000351 master->tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352
353drop:
Eric Dumazet1ac9ad12011-01-12 12:13:14 +0000354 master->tx_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 dev_kfree_skb(skb);
Patrick McHardy6ed10652009-06-23 06:03:08 +0000356 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357}
358
359static int teql_master_open(struct net_device *dev)
360{
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000361 struct Qdisc *q;
Patrick McHardy2941a482006-01-08 22:05:26 -0800362 struct teql_master *m = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 int mtu = 0xFFFE;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000364 unsigned int flags = IFF_NOARP | IFF_MULTICAST;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365
366 if (m->slaves == NULL)
367 return -EUNATCH;
368
369 flags = FMASK;
370
371 q = m->slaves;
372 do {
David S. Miller5ce2d482008-07-08 17:06:30 -0700373 struct net_device *slave = qdisc_dev(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374
375 if (slave == NULL)
376 return -EUNATCH;
377
378 if (slave->mtu < mtu)
379 mtu = slave->mtu;
380 if (slave->hard_header_len > LL_MAX_HEADER)
381 return -EINVAL;
382
383 /* If all the slaves are BROADCAST, master is BROADCAST
384 If all the slaves are PtP, master is PtP
385 Otherwise, master is NBMA.
386 */
387 if (!(slave->flags&IFF_POINTOPOINT))
388 flags &= ~IFF_POINTOPOINT;
389 if (!(slave->flags&IFF_BROADCAST))
390 flags &= ~IFF_BROADCAST;
391 if (!(slave->flags&IFF_MULTICAST))
392 flags &= ~IFF_MULTICAST;
393 } while ((q = NEXT_SLAVE(q)) != m->slaves);
394
395 m->dev->mtu = mtu;
396 m->dev->flags = (m->dev->flags&~FMASK) | flags;
397 netif_start_queue(m->dev);
398 return 0;
399}
400
401static int teql_master_close(struct net_device *dev)
402{
403 netif_stop_queue(dev);
404 return 0;
405}
406
stephen hemmingerbc1f4472017-01-06 19:12:52 -0800407static void teql_master_stats64(struct net_device *dev,
408 struct rtnl_link_stats64 *stats)
Eric Dumazet1ac9ad12011-01-12 12:13:14 +0000409{
410 struct teql_master *m = netdev_priv(dev);
411
412 stats->tx_packets = m->tx_packets;
413 stats->tx_bytes = m->tx_bytes;
414 stats->tx_errors = m->tx_errors;
415 stats->tx_dropped = m->tx_dropped;
Eric Dumazet1ac9ad12011-01-12 12:13:14 +0000416}
417
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418static int teql_master_mtu(struct net_device *dev, int new_mtu)
419{
Patrick McHardy2941a482006-01-08 22:05:26 -0800420 struct teql_master *m = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 struct Qdisc *q;
422
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 q = m->slaves;
424 if (q) {
425 do {
David S. Miller5ce2d482008-07-08 17:06:30 -0700426 if (new_mtu > qdisc_dev(q)->mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 return -EINVAL;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000428 } while ((q = NEXT_SLAVE(q)) != m->slaves);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 }
430
431 dev->mtu = new_mtu;
432 return 0;
433}
434
Stephen Hemminger61294e22009-01-06 10:45:57 -0800435static const struct net_device_ops teql_netdev_ops = {
436 .ndo_open = teql_master_open,
437 .ndo_stop = teql_master_close,
438 .ndo_start_xmit = teql_master_xmit,
Eric Dumazet1ac9ad12011-01-12 12:13:14 +0000439 .ndo_get_stats64 = teql_master_stats64,
Stephen Hemminger61294e22009-01-06 10:45:57 -0800440 .ndo_change_mtu = teql_master_mtu,
441};
442
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443static __init void teql_master_setup(struct net_device *dev)
444{
Patrick McHardy2941a482006-01-08 22:05:26 -0800445 struct teql_master *master = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 struct Qdisc_ops *ops = &master->qops;
447
448 master->dev = dev;
449 ops->priv_size = sizeof(struct teql_sched_data);
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900450
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 ops->enqueue = teql_enqueue;
452 ops->dequeue = teql_dequeue;
Jarek Poplawski8e3af972008-10-31 00:45:55 -0700453 ops->peek = teql_peek;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 ops->init = teql_qdisc_init;
455 ops->reset = teql_reset;
456 ops->destroy = teql_destroy;
457 ops->owner = THIS_MODULE;
458
Stephen Hemminger61294e22009-01-06 10:45:57 -0800459 dev->netdev_ops = &teql_netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 dev->type = ARPHRD_VOID;
461 dev->mtu = 1500;
Jarod Wilson91572082016-10-20 13:55:20 -0400462 dev->min_mtu = 68;
463 dev->max_mtu = 65535;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 dev->tx_queue_len = 100;
465 dev->flags = IFF_NOARP;
466 dev->hard_header_len = LL_MAX_HEADER;
Eric Dumazet02875872014-10-05 18:38:35 -0700467 netif_keep_dst(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468}
469
470static LIST_HEAD(master_dev_list);
471static int max_equalizers = 1;
472module_param(max_equalizers, int, 0);
473MODULE_PARM_DESC(max_equalizers, "Max number of link equalizers");
474
475static int __init teql_init(void)
476{
477 int i;
478 int err = -ENODEV;
479
480 for (i = 0; i < max_equalizers; i++) {
481 struct net_device *dev;
482 struct teql_master *master;
483
Tom Gundersenc835a672014-07-14 16:37:24 +0200484 dev = alloc_netdev(sizeof(struct teql_master), "teql%d",
485 NET_NAME_UNKNOWN, teql_master_setup);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 if (!dev) {
487 err = -ENOMEM;
488 break;
489 }
490
491 if ((err = register_netdev(dev))) {
492 free_netdev(dev);
493 break;
494 }
495
Patrick McHardy2941a482006-01-08 22:05:26 -0800496 master = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
498 strlcpy(master->qops.id, dev->name, IFNAMSIZ);
499 err = register_qdisc(&master->qops);
500
501 if (err) {
502 unregister_netdev(dev);
503 free_netdev(dev);
504 break;
505 }
506
507 list_add_tail(&master->master_list, &master_dev_list);
508 }
509 return i ? 0 : err;
510}
511
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900512static void __exit teql_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513{
514 struct teql_master *master, *nxt;
515
516 list_for_each_entry_safe(master, nxt, &master_dev_list, master_list) {
517
518 list_del(&master->master_list);
519
520 unregister_qdisc(&master->qops);
521 unregister_netdev(master->dev);
522 free_netdev(master->dev);
523 }
524}
525
526module_init(teql_init);
527module_exit(teql_exit);
528
529MODULE_LICENSE("GPL");