1 /*
2 * Copyright (c) 2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #ifndef NET_FIREWALL_CT_H
16 #define NET_FIREWALL_CT_H
17
18 #include "netfirewall_utils.h"
19 #include "netfirewall_ct_def.h"
20 #include "netfirewall_ct_map.h"
21
22 #define READ_ONCE(x) (*(volatile typeof(x) *)&(x))
23 #define WRITE_ONCE(x, v) (*(volatile typeof(x) *)&(x)) = (v)
24
reset_seen_flags(struct ct_entry * entry)25 static __always_inline void reset_seen_flags(struct ct_entry *entry)
26 {
27 entry->rx_seen_flag = 0;
28 entry->tx_seen_flag = 0;
29 }
30
reset_closing_flags(struct ct_entry * entry)31 static __always_inline void reset_closing_flags(struct ct_entry *entry)
32 {
33 entry->rx_closing_flag = 0;
34 entry->tx_closing_flag = 0;
35 }
36
is_conn_alive(const struct ct_entry * entry)37 static __always_inline bool is_conn_alive(const struct ct_entry *entry)
38 {
39 return !entry->rx_closing_flag || !entry->tx_closing_flag;
40 }
41
is_conn_closing(const struct ct_entry * entry)42 static __always_inline bool is_conn_closing(const struct ct_entry *entry)
43 {
44 return entry->tx_closing_flag || entry->rx_closing_flag;
45 }
46
get_tcp_conn_action(union tcp_flags flags)47 static __always_inline enum ct_action get_tcp_conn_action(union tcp_flags flags)
48 {
49 if (flags.value & (TCP_FLAG_RST | TCP_FLAG_FIN)) {
50 return CT_ACTION_CLOSE;
51 }
52
53 if (flags.value & TCP_FLAG_SYN) {
54 return CT_ACTION_CREATE;
55 }
56
57 return CT_ACTION_UNSPEC;
58 }
59
is_seen_both_syns(const struct ct_entry * entry)60 static __always_inline bool is_seen_both_syns(const struct ct_entry *entry)
61 {
62 bool rx_syn = entry->rx_seen_flag & TCP_FLAG_SYN;
63 bool tx_syn = entry->tx_seen_flag & TCP_FLAG_SYN;
64
65 return rx_syn && tx_syn;
66 }
67
update_timeout_inner(struct ct_entry * entry,__u32 lifetime,enum ct_dir dir,union tcp_flags flags)68 static __always_inline bool update_timeout_inner(struct ct_entry *entry, __u32 lifetime, enum ct_dir dir,
69 union tcp_flags flags)
70 {
71 __u32 now = bpf_ktime_get_ns() / NS_PER_SEC;
72 __u8 last_seen_flags;
73 __u8 seen_flags = flags.lower_bits & REPORT_FLAGS;
74 __u32 last_report;
75
76 WRITE_ONCE(entry->lifetime, now + lifetime);
77
78 if (dir == CT_INGRESS) {
79 last_seen_flags = READ_ONCE(entry->rx_seen_flag);
80 last_report = READ_ONCE(entry->last_rx_report);
81 } else {
82 last_seen_flags = READ_ONCE(entry->tx_seen_flag);
83 last_report = READ_ONCE(entry->last_tx_report);
84 }
85 seen_flags |= last_seen_flags;
86 if ((last_report + REPORT_INTERVAL_SEC) < now || last_seen_flags != seen_flags) {
87 if (dir == CT_INGRESS) {
88 WRITE_ONCE(entry->rx_seen_flag, seen_flags);
89 WRITE_ONCE(entry->last_rx_report, now);
90 } else {
91 WRITE_ONCE(entry->tx_seen_flag, seen_flags);
92 WRITE_ONCE(entry->last_tx_report, now);
93 }
94 return true;
95 }
96 return false;
97 }
98
99 /**
100 * @brief Update the CT timeouts for the specified entry.
101 *
102 * @param entry struct ct_entry
103 * @param tcp tcp connection
104 * @param dir enum ct_dir
105 * @param seen_flags union tcp_flags
106 * @return If REPORT_INTERVAL_SEC has elapsed since the last update, updates the last_updated timestamp
107 * and returns true. Otherwise returns false.
108 */
ct_update_timeout(struct ct_entry * entry,bool tcp,enum ct_dir dir,union tcp_flags seen_flags)109 static __always_inline bool ct_update_timeout(struct ct_entry *entry, bool tcp, enum ct_dir dir,
110 union tcp_flags seen_flags)
111 {
112 __u32 timeout = NONTCP_CONN_TIMEOUT_SEC;
113 bool syn = seen_flags.value & TCP_FLAG_SYN;
114
115 if (tcp) {
116 entry->seen_non_syn |= !syn;
117 if (entry->seen_non_syn) {
118 timeout = TCP_CONN_TIMEOUT_SEC;
119 } else {
120 timeout = TCP_SYN_TIMEOUT_SEC;
121 }
122 }
123
124 return update_timeout_inner(entry, timeout, dir, seen_flags);
125 }
126
127 /**
128 * @brief create a key pair of ct_tuple/ct_entry and add to ct map
129 *
130 * @param tuple struct ct_tuple
131 * @param skb struct __sk_buff
132 * @param dir enum ct_dir
133 * @return true if success or false if an error occurred
134 */
ct_create_entry(struct ct_tuple * tuple,struct __sk_buff * skb,const enum ct_dir dir)135 static __always_inline bool ct_create_entry(struct ct_tuple *tuple, struct __sk_buff *skb, const enum ct_dir dir)
136 {
137 struct ct_entry entry = { 0 };
138 bool is_tcp = (tuple->protocol == IPPROTO_TCP);
139 union tcp_flags seen_flags = {
140 .value = 0
141 };
142
143 seen_flags.value |= is_tcp ? TCP_FLAG_SYN : 0;
144 ct_update_timeout(&entry, is_tcp, dir, seen_flags);
145
146 return bpf_map_update_elem(&CT_MAP, tuple, &entry, 0) == 0;
147 }
148
149 /**
150 * @brief lookup from ct map by ct_tuple if found then update lifetime of connection
151 *
152 * @param skb struct __sk_buff
153 * @param tuple struct ct_tuple
154 * @param dir enum ct_dir
155 * @return CT_NEW if not found, otherwise CT_RELATED, CT_REOPENED or CT_ESTABLISHED
156 */
ct_lookup_entry(struct __sk_buff * skb,const struct ct_tuple * tuple,enum ct_dir dir)157 static __always_inline enum ct_status ct_lookup_entry(struct __sk_buff *skb, const struct ct_tuple *tuple,
158 enum ct_dir dir)
159 {
160 struct ct_entry *entry = bpf_map_lookup_elem(&CT_MAP, tuple);
161 if (entry) {
162 __u32 l3_nhoff = get_l3_nhoff(skb);
163 bool is_tcp = is_l4_protocol(skb, l3_nhoff, IPPROTO_TCP);
164 union tcp_flags seen_flags = {};
165 if (is_tcp) {
166 __u32 l4_nhoff = get_l4_nhoff(skb);
167 if (load_tcp_flags(skb, l4_nhoff, &seen_flags) < 0) {
168 return CT_RELATED;
169 }
170 }
171 if (is_conn_alive(entry)) {
172 ct_update_timeout(entry, is_tcp, dir, seen_flags);
173 }
174 enum ct_action action = get_tcp_conn_action(seen_flags);
175 switch (action) {
176 case CT_ACTION_CREATE:
177 if (is_conn_closing(entry)) {
178 reset_closing_flags(entry);
179 reset_seen_flags(entry);
180 entry->seen_non_syn = false;
181 ct_update_timeout(entry, is_tcp, dir, seen_flags);
182 return CT_REOPENED;
183 }
184 break;
185 case CT_ACTION_CLOSE:
186 if (!is_seen_both_syns(entry) && (seen_flags.value & TCP_FLAG_RST)) {
187 entry->rx_closing_flag = 1;
188 entry->tx_closing_flag = 1;
189 } else if (dir == CT_INGRESS) {
190 entry->rx_closing_flag = 1;
191 } else {
192 entry->tx_closing_flag = 1;
193 }
194
195 if (is_conn_alive(entry)) {
196 break;
197 }
198 bpf_map_delete_elem(&CT_MAP, tuple);
199 break;
200 default:
201 break;
202 }
203 return CT_ESTABLISHED;
204 }
205
206 return CT_NEW;
207 }
208
209 #endif // NET_FIREWALL_CT_H
210