2 * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
4 * Marek Lindner, Simon Wunderlich
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "translation-table.h"
26 #include "soft-interface.h"
27 #include "hard-interface.h"
30 #include "aggregation.h"
31 #include "gateway_common.h"
32 #include "originator.h"
35 static void send_outstanding_bcast_packet(struct work_struct *work);
37 /* apply hop penalty for a normal link */
38 static uint8_t hop_penalty(const uint8_t tq, struct bat_priv *bat_priv)
40 int hop_penalty = atomic_read(&bat_priv->hop_penalty);
41 return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE);
44 /* when do we schedule our own packet to be sent */
45 static unsigned long own_send_time(struct bat_priv *bat_priv)
47 return jiffies + msecs_to_jiffies(
48 atomic_read(&bat_priv->orig_interval) -
49 JITTER + (random32() % 2*JITTER));
52 /* when do we schedule a forwarded packet to be sent */
53 static unsigned long forward_send_time(struct bat_priv *bat_priv)
55 return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
58 /* send out an already prepared packet to the given address via the
59 * specified batman interface */
60 int send_skb_packet(struct sk_buff *skb,
61 struct batman_if *batman_if,
64 struct ethhdr *ethhdr;
66 if (batman_if->if_status != IF_ACTIVE)
69 if (unlikely(!batman_if->net_dev))
72 if (!(batman_if->net_dev->flags & IFF_UP)) {
73 pr_warning("Interface %s is not up - can't send packet via "
74 "that interface!\n", batman_if->net_dev->name);
78 /* push to the ethernet header. */
79 if (my_skb_head_push(skb, sizeof(struct ethhdr)) < 0)
82 skb_reset_mac_header(skb);
84 ethhdr = (struct ethhdr *) skb_mac_header(skb);
85 memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN);
86 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
87 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
89 skb_set_network_header(skb, ETH_HLEN);
90 skb->priority = TC_PRIO_CONTROL;
91 skb->protocol = __constant_htons(ETH_P_BATMAN);
93 skb->dev = batman_if->net_dev;
95 /* dev_queue_xmit() returns a negative result on error. However on
96 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
97 * (which is > 0). This will not be treated as an error. */
99 return dev_queue_xmit(skb);
102 return NET_XMIT_DROP;
105 /* Send a packet to a given interface */
106 static void send_packet_to_if(struct forw_packet *forw_packet,
107 struct batman_if *batman_if)
109 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
113 struct batman_packet *batman_packet;
116 if (batman_if->if_status != IF_ACTIVE)
121 batman_packet = (struct batman_packet *)forw_packet->skb->data;
123 /* adjust all flags and log packets */
124 while (aggregated_packet(buff_pos,
125 forw_packet->packet_len,
126 batman_packet->num_hna)) {
128 /* we might have aggregated direct link packets with an
129 * ordinary base packet */
130 if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
131 (forw_packet->if_incoming == batman_if))
132 batman_packet->flags |= DIRECTLINK;
134 batman_packet->flags &= ~DIRECTLINK;
136 fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
139 bat_dbg(DBG_BATMAN, bat_priv,
140 "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
141 " IDF %s) on interface %s [%pM]\n",
142 fwd_str, (packet_num > 0 ? "aggregated " : ""),
143 batman_packet->orig, ntohl(batman_packet->seqno),
144 batman_packet->tq, batman_packet->ttl,
145 (batman_packet->flags & DIRECTLINK ?
147 batman_if->net_dev->name, batman_if->net_dev->dev_addr);
149 buff_pos += sizeof(struct batman_packet) +
150 (batman_packet->num_hna * ETH_ALEN);
152 batman_packet = (struct batman_packet *)
153 (forw_packet->skb->data + buff_pos);
156 /* create clone because function is called more than once */
157 skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
159 send_skb_packet(skb, batman_if, broadcast_addr);
162 /* send a batman packet */
163 static void send_packet(struct forw_packet *forw_packet)
165 struct batman_if *batman_if;
166 struct net_device *soft_iface;
167 struct bat_priv *bat_priv;
168 struct batman_packet *batman_packet =
169 (struct batman_packet *)(forw_packet->skb->data);
170 unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
172 if (!forw_packet->if_incoming) {
173 pr_err("Error - can't forward packet: incoming iface not "
178 soft_iface = forw_packet->if_incoming->soft_iface;
179 bat_priv = netdev_priv(soft_iface);
181 if (forw_packet->if_incoming->if_status != IF_ACTIVE)
184 /* multihomed peer assumed */
185 /* non-primary OGMs are only broadcasted on their interface */
186 if ((directlink && (batman_packet->ttl == 1)) ||
187 (forw_packet->own && (forw_packet->if_incoming->if_num > 0))) {
189 /* FIXME: what about aggregated packets ? */
190 bat_dbg(DBG_BATMAN, bat_priv,
191 "%s packet (originator %pM, seqno %d, TTL %d) "
192 "on interface %s [%pM]\n",
193 (forw_packet->own ? "Sending own" : "Forwarding"),
194 batman_packet->orig, ntohl(batman_packet->seqno),
196 forw_packet->if_incoming->net_dev->name,
197 forw_packet->if_incoming->net_dev->dev_addr);
199 /* skb is only used once and than forw_packet is free'd */
200 send_skb_packet(forw_packet->skb, forw_packet->if_incoming,
202 forw_packet->skb = NULL;
207 /* broadcast on every interface */
209 list_for_each_entry_rcu(batman_if, &if_list, list) {
210 if (batman_if->soft_iface != soft_iface)
213 send_packet_to_if(forw_packet, batman_if);
218 static void rebuild_batman_packet(struct bat_priv *bat_priv,
219 struct batman_if *batman_if)
222 unsigned char *new_buff;
223 struct batman_packet *batman_packet;
225 new_len = sizeof(struct batman_packet) +
226 (bat_priv->num_local_hna * ETH_ALEN);
227 new_buff = kmalloc(new_len, GFP_ATOMIC);
229 /* keep old buffer if kmalloc should fail */
231 memcpy(new_buff, batman_if->packet_buff,
232 sizeof(struct batman_packet));
233 batman_packet = (struct batman_packet *)new_buff;
235 batman_packet->num_hna = hna_local_fill_buffer(bat_priv,
236 new_buff + sizeof(struct batman_packet),
237 new_len - sizeof(struct batman_packet));
239 kfree(batman_if->packet_buff);
240 batman_if->packet_buff = new_buff;
241 batman_if->packet_len = new_len;
245 void schedule_own_packet(struct batman_if *batman_if)
247 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
248 unsigned long send_time;
249 struct batman_packet *batman_packet;
252 if ((batman_if->if_status == IF_NOT_IN_USE) ||
253 (batman_if->if_status == IF_TO_BE_REMOVED))
256 vis_server = atomic_read(&bat_priv->vis_mode);
259 * the interface gets activated here to avoid race conditions between
260 * the moment of activating the interface in
261 * hardif_activate_interface() where the originator mac is set and
262 * outdated packets (especially uninitialized mac addresses) in the
265 if (batman_if->if_status == IF_TO_BE_ACTIVATED)
266 batman_if->if_status = IF_ACTIVE;
268 /* if local hna has changed and interface is a primary interface */
269 if ((atomic_read(&bat_priv->hna_local_changed)) &&
270 (batman_if == bat_priv->primary_if))
271 rebuild_batman_packet(bat_priv, batman_if);
274 * NOTE: packet_buff might just have been re-allocated in
275 * rebuild_batman_packet()
277 batman_packet = (struct batman_packet *)batman_if->packet_buff;
279 /* change sequence number to network order */
280 batman_packet->seqno =
281 htonl((uint32_t)atomic_read(&batman_if->seqno));
283 if (vis_server == VIS_TYPE_SERVER_SYNC)
284 batman_packet->flags |= VIS_SERVER;
286 batman_packet->flags &= ~VIS_SERVER;
288 if ((batman_if == bat_priv->primary_if) &&
289 (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
290 batman_packet->gw_flags =
291 (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
293 batman_packet->gw_flags = 0;
295 atomic_inc(&batman_if->seqno);
297 slide_own_bcast_window(batman_if);
298 send_time = own_send_time(bat_priv);
299 add_bat_packet_to_list(bat_priv,
300 batman_if->packet_buff,
301 batman_if->packet_len,
302 batman_if, 1, send_time);
305 void schedule_forward_packet(struct orig_node *orig_node,
306 struct ethhdr *ethhdr,
307 struct batman_packet *batman_packet,
308 uint8_t directlink, int hna_buff_len,
309 struct batman_if *if_incoming)
311 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
312 unsigned char in_tq, in_ttl, tq_avg = 0;
313 unsigned long send_time;
315 if (batman_packet->ttl <= 1) {
316 bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
320 in_tq = batman_packet->tq;
321 in_ttl = batman_packet->ttl;
323 batman_packet->ttl--;
324 memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
326 /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
327 * of our best tq value */
328 if ((orig_node->router) && (orig_node->router->tq_avg != 0)) {
330 /* rebroadcast ogm of best ranking neighbor as is */
331 if (!compare_orig(orig_node->router->addr, ethhdr->h_source)) {
332 batman_packet->tq = orig_node->router->tq_avg;
334 if (orig_node->router->last_ttl)
335 batman_packet->ttl = orig_node->router->last_ttl
339 tq_avg = orig_node->router->tq_avg;
342 /* apply hop penalty */
343 batman_packet->tq = hop_penalty(batman_packet->tq, bat_priv);
345 bat_dbg(DBG_BATMAN, bat_priv,
346 "Forwarding packet: tq_orig: %i, tq_avg: %i, "
347 "tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
348 in_tq, tq_avg, batman_packet->tq, in_ttl - 1,
351 batman_packet->seqno = htonl(batman_packet->seqno);
353 /* switch of primaries first hop flag when forwarding */
354 batman_packet->flags &= ~PRIMARIES_FIRST_HOP;
356 batman_packet->flags |= DIRECTLINK;
358 batman_packet->flags &= ~DIRECTLINK;
360 send_time = forward_send_time(bat_priv);
361 add_bat_packet_to_list(bat_priv,
362 (unsigned char *)batman_packet,
363 sizeof(struct batman_packet) + hna_buff_len,
364 if_incoming, 0, send_time);
367 static void forw_packet_free(struct forw_packet *forw_packet)
369 if (forw_packet->skb)
370 kfree_skb(forw_packet->skb);
374 static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
375 struct forw_packet *forw_packet,
376 unsigned long send_time)
378 INIT_HLIST_NODE(&forw_packet->list);
380 /* add new packet to packet list */
381 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
382 hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
383 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
385 /* start timer for this packet */
386 INIT_DELAYED_WORK(&forw_packet->delayed_work,
387 send_outstanding_bcast_packet);
388 queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work,
392 #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
393 /* add a broadcast packet to the queue and setup timers. broadcast packets
394 * are sent multiple times to increase probability for beeing received.
396 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
399 * The skb is not consumed, so the caller should make sure that the
401 int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
403 struct forw_packet *forw_packet;
404 struct bcast_packet *bcast_packet;
406 if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
407 bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
411 if (!bat_priv->primary_if)
414 forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
419 skb = skb_copy(skb, GFP_ATOMIC);
423 /* as we have a copy now, it is safe to decrease the TTL */
424 bcast_packet = (struct bcast_packet *)skb->data;
427 skb_reset_mac_header(skb);
429 forw_packet->skb = skb;
430 forw_packet->if_incoming = bat_priv->primary_if;
432 /* how often did we send the bcast packet ? */
433 forw_packet->num_packets = 0;
435 _add_bcast_packet_to_list(bat_priv, forw_packet, 1);
441 atomic_inc(&bat_priv->bcast_queue_left);
443 return NETDEV_TX_BUSY;
446 static void send_outstanding_bcast_packet(struct work_struct *work)
448 struct batman_if *batman_if;
449 struct delayed_work *delayed_work =
450 container_of(work, struct delayed_work, work);
451 struct forw_packet *forw_packet =
452 container_of(delayed_work, struct forw_packet, delayed_work);
453 struct sk_buff *skb1;
454 struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
455 struct bat_priv *bat_priv = netdev_priv(soft_iface);
457 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
458 hlist_del(&forw_packet->list);
459 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
461 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
464 /* rebroadcast packet */
466 list_for_each_entry_rcu(batman_if, &if_list, list) {
467 if (batman_if->soft_iface != soft_iface)
470 /* send a copy of the saved skb */
471 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
473 send_skb_packet(skb1, batman_if, broadcast_addr);
477 forw_packet->num_packets++;
479 /* if we still have some more bcasts to send */
480 if (forw_packet->num_packets < 3) {
481 _add_bcast_packet_to_list(bat_priv, forw_packet,
487 forw_packet_free(forw_packet);
488 atomic_inc(&bat_priv->bcast_queue_left);
491 void send_outstanding_bat_packet(struct work_struct *work)
493 struct delayed_work *delayed_work =
494 container_of(work, struct delayed_work, work);
495 struct forw_packet *forw_packet =
496 container_of(delayed_work, struct forw_packet, delayed_work);
497 struct bat_priv *bat_priv;
499 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
500 spin_lock_bh(&bat_priv->forw_bat_list_lock);
501 hlist_del(&forw_packet->list);
502 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
504 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
507 send_packet(forw_packet);
510 * we have to have at least one packet in the queue
511 * to determine the queues wake up time unless we are
514 if (forw_packet->own)
515 schedule_own_packet(forw_packet->if_incoming);
518 /* don't count own packet */
519 if (!forw_packet->own)
520 atomic_inc(&bat_priv->batman_queue_left);
522 forw_packet_free(forw_packet);
525 void purge_outstanding_packets(struct bat_priv *bat_priv,
526 struct batman_if *batman_if)
528 struct forw_packet *forw_packet;
529 struct hlist_node *tmp_node, *safe_tmp_node;
532 bat_dbg(DBG_BATMAN, bat_priv,
533 "purge_outstanding_packets(): %s\n",
534 batman_if->net_dev->name);
536 bat_dbg(DBG_BATMAN, bat_priv,
537 "purge_outstanding_packets()\n");
539 /* free bcast list */
540 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
541 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
542 &bat_priv->forw_bcast_list, list) {
545 * if purge_outstanding_packets() was called with an argmument
546 * we delete only packets belonging to the given interface
549 (forw_packet->if_incoming != batman_if))
552 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
555 * send_outstanding_bcast_packet() will lock the list to
556 * delete the item from the list
558 cancel_delayed_work_sync(&forw_packet->delayed_work);
559 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
561 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
563 /* free batman packet list */
564 spin_lock_bh(&bat_priv->forw_bat_list_lock);
565 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
566 &bat_priv->forw_bat_list, list) {
569 * if purge_outstanding_packets() was called with an argmument
570 * we delete only packets belonging to the given interface
573 (forw_packet->if_incoming != batman_if))
576 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
579 * send_outstanding_bat_packet() will lock the list to
580 * delete the item from the list
582 cancel_delayed_work_sync(&forw_packet->delayed_work);
583 spin_lock_bh(&bat_priv->forw_bat_list_lock);
585 spin_unlock_bh(&bat_priv->forw_bat_list_lock);