1 /*
2  * Copyright (c) 2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef NET_FIREWALL_H
16 #define NET_FIREWALL_H
17 
18 #include <linux/bpf.h>
19 
20 #include "netfirewall_def.h"
21 #include "netfirewall_match.h"
22 #include "netfirewall_ct.h"
23 #include "netfirewall_event.h"
24 
25 /**
26  * @brief if tcp socket was intercepted, need send reset packet to peer
27  *
28  * @param tuple match tuple of skb meta data
29  * @param skb struct __sk_buff
30  * @param dir enum stream_dir
31  * @return 0 if no error, -1 if an error occurred
32  */
send_sock_tcp_reset(struct match_tuple * tuple,struct __sk_buff * skb,enum stream_dir dir)33 static __always_inline int send_sock_tcp_reset(struct match_tuple *tuple, struct __sk_buff *skb, enum stream_dir dir)
34 {
35     if (!skb || !tuple) {
36         return -1;
37     }
38     if (tuple->protocol == IPPROTO_TCP) {
39         if (dir == INGRESS) {
40             bpf_sock_tcp_send_reset(skb);
41         }
42         return bpf_sock_destroy(skb);
43     }
44     return -1;
45 }
46 
47 /**
48  * @brief Get the packet rst on tuple
49  *
50  * @param tuple struct match_tuple
51  * @return true if success or false if an error occurred
52  */
get_packet_rst_flag(struct match_tuple * tuple)53 static __always_inline bool get_packet_rst_flag(struct match_tuple *tuple)
54 {
55     if (!tuple) {
56         return false;
57     }
58 
59     if (tuple->rst == 1) {
60         return true;
61     }
62 
63     return false;
64 }
65 
66 /**
67  * @brief Get the ct tuple from match tuple
68  *
69  * @param match_tpl struct match_tuple
70  * @param ct_tpl struct ct_tuple
71  * @return true if success or false if an error occurred
72  */
get_ct_tuple(struct match_tuple * match_tpl,struct ct_tuple * ct_tpl)73 static __always_inline bool get_ct_tuple(struct match_tuple *match_tpl, struct ct_tuple *ct_tpl)
74 {
75     if (!match_tpl || !ct_tpl) {
76         return false;
77     }
78 
79     ct_tpl->family = match_tpl->family;
80     ct_tpl->protocol = match_tpl->protocol;
81     ct_tpl->sport = match_tpl->sport;
82     ct_tpl->dport = match_tpl->dport;
83 
84     if (match_tpl->family == AF_INET) {
85         ct_tpl->ipv4.saddr = match_tpl->ipv4.saddr;
86         ct_tpl->ipv4.daddr = match_tpl->ipv4.daddr;
87     } else {
88         ct_tpl->ipv6.saddr = match_tpl->ipv6.saddr;
89         ct_tpl->ipv6.daddr = match_tpl->ipv6.daddr;
90     }
91 
92     return true;
93 }
94 
95 /**
96  * @brief Determine ingress packet drop or not
97  *
98  * @param skb struct __sk_buff
99  * @return SK_DROP if intercepted or SK_PASS if not
100  */
netfirewall_policy_ingress(struct __sk_buff * skb)101 static __always_inline enum sk_action netfirewall_policy_ingress(struct __sk_buff *skb)
102 {
103     struct match_tuple tuple = { 0 };
104     if (!get_match_tuple(skb, &tuple, INGRESS)) {
105         return SK_PASS;
106     }
107 
108     log_tuple(&tuple);
109 
110     struct ct_tuple ct_tpl = {};
111     if (!get_ct_tuple(&tuple, &ct_tpl)) {
112         return SK_PASS;
113     }
114 
115     enum ct_status status = ct_lookup_entry(skb, &ct_tpl, CT_INGRESS);
116     log_dbg(DBG_CT_LOOKUP, INGRESS, status);
117     if (status != CT_NEW) {
118         return SK_PASS;
119     }
120 
121     if (get_packet_rst_flag(&tuple)) {
122         return SK_PASS;
123     }
124 
125     struct bitmap key = { 0 };
126     if (!match_action_key(&tuple, &key)) {
127         return SK_PASS;
128     }
129 
130     if (match_action(&tuple, &key) != SK_PASS) {
131         log_intercept(&tuple);
132         send_sock_tcp_reset(&tuple, skb, INGRESS);
133         return SK_DROP;
134     }
135 
136     if (status == CT_NEW) {
137         ct_create_entry(&ct_tpl, skb, CT_INGRESS);
138     }
139 
140     return SK_PASS;
141 }
142 
143 /**
144  * @brief Determine egress packet drop or not
145  *
146  * @param skb struct __sk_buff
147  * @return SK_DROP if intercepted or SK_PASS if not
148  */
netfirewall_policy_egress(struct __sk_buff * skb)149 static __always_inline enum sk_action netfirewall_policy_egress(struct __sk_buff *skb)
150 {
151     struct match_tuple tuple = { 0 };
152     if (!get_match_tuple(skb, &tuple, EGRESS)) {
153         return SK_PASS;
154     }
155 
156     log_tuple(&tuple);
157 
158     if (get_packet_rst_flag(&tuple)) {
159         return SK_PASS;
160     }
161 
162     struct ct_tuple ct_tpl = {};
163     if (!get_ct_tuple(&tuple, &ct_tpl)) {
164         return SK_PASS;
165     }
166 
167     enum ct_status status = ct_lookup_entry(skb, &ct_tpl, CT_EGRESS);
168     log_dbg(DBG_CT_LOOKUP, EGRESS, status);
169     if (status != CT_NEW) {
170         return SK_PASS;
171     }
172 
173     if (get_packet_rst_flag(&tuple)) {
174         return SK_PASS;
175     }
176 
177     struct bitmap key = { 0 };
178     if (!match_action_key(&tuple, &key)) {
179         return SK_PASS;
180     }
181 
182     if (match_action(&tuple, &key) != SK_PASS) {
183         log_intercept(&tuple);
184         send_sock_tcp_reset(&tuple, skb, EGRESS);
185         return SK_DROP;
186     }
187 
188     if (status == CT_NEW) {
189         ct_create_entry(&ct_tpl, skb, CT_EGRESS);
190     }
191 
192     return SK_PASS;
193 }
194 
195 #endif // NET_FIREWALL_H