blob: 3e1d890aafccb80b430a78e38f700b8f476c5467 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
|
/*
* Distributed under the terms of the GNU GPL version 2.
* Copyright (c) 2010, 2011 Nicira Networks.
*
* Significant portions of this file may be copied from parts of the Linux
* kernel, by Linus Torvalds and others.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/hardirq.h>
#include <linux/kernel.h>
#include <linux/percpu.h>
#include <linux/sched.h>
#include "loop_counter.h"
int loop_suppress(struct datapath *dp, struct sw_flow_actions *actions)
{
if (net_ratelimit())
pr_warn("%s: flow looped %d times, dropping\n",
dp_name(dp), MAX_LOOPS);
actions->actions_len = 0;
return -ELOOP;
}
#ifndef CONFIG_PREEMPT_RT
/* We use a separate counter for each CPU for both interrupt and non-interrupt
* context in order to keep the limit deterministic for a given packet.
*/
struct percpu_loop_counters {
struct loop_counter counters[2];
};
static DEFINE_PER_CPU(struct percpu_loop_counters, loop_counters);
struct loop_counter *loop_get_counter(void)
{
return &get_cpu_var(loop_counters).counters[!!in_interrupt()];
}
void loop_put_counter(void)
{
put_cpu_var(loop_counters);
}
#else /* !CONFIG_PREEMPT_RT */
struct loop_counter *loop_get_counter(void)
{
WARN_ON(in_interrupt());
/* Only two bits of the extra_flags field in struct task_struct are
* used and it's an unsigned int. We hijack the most significant bits
* to be our counter structure. On RT kernels softirqs always run in
* process context so we are guaranteed to have a valid task_struct.
*/
#ifdef __LITTLE_ENDIAN
return (void *)(¤t->extra_flags + 1) -
sizeof(struct loop_counter);
#elif __BIG_ENDIAN
return (struct loop_counter *)¤t->extra_flags;
#else
#error "Please fix <asm/byteorder.h>."
#endif
}
void loop_put_counter(void) { }
#endif /* CONFIG_PREEMPT_RT */
|