1 | /********************************************************************************************************* |
---|
2 | * Software License Agreement (BSD License) * |
---|
3 | * Author: Sebastien Decugis <sdecugis@nict.go.jp> * |
---|
4 | * * |
---|
5 | * Copyright (c) 2011, WIDE Project and NICT * |
---|
6 | * All rights reserved. * |
---|
7 | * * |
---|
8 | * Redistribution and use of this software in source and binary forms, with or without modification, are * |
---|
9 | * permitted provided that the following conditions are met: * |
---|
10 | * * |
---|
11 | * * Redistributions of source code must retain the above * |
---|
12 | * copyright notice, this list of conditions and the * |
---|
13 | * following disclaimer. * |
---|
14 | * * |
---|
15 | * * Redistributions in binary form must reproduce the above * |
---|
16 | * copyright notice, this list of conditions and the * |
---|
17 | * following disclaimer in the documentation and/or other * |
---|
18 | * materials provided with the distribution. * |
---|
19 | * * |
---|
20 | * * Neither the name of the WIDE Project or NICT nor the * |
---|
21 | * names of its contributors may be used to endorse or * |
---|
22 | * promote products derived from this software without * |
---|
23 | * specific prior written permission of WIDE Project and * |
---|
24 | * NICT. * |
---|
25 | * * |
---|
26 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED * |
---|
27 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * |
---|
28 | * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * |
---|
29 | * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * |
---|
30 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * |
---|
31 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * |
---|
32 | * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * |
---|
33 | * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * |
---|
34 | *********************************************************************************************************/ |
---|
35 | |
---|
36 | #include "fdcore-internal.h" |
---|
37 | |
---|
38 | /* |
---|
39 | This file implement a Peer State Machine which is a mix of: |
---|
40 | - the state machine described in rfc3588bis |
---|
41 | - the state machine described in rfc3539#section-3.4 |
---|
42 | - the following observations. |
---|
43 | |
---|
44 | The delivery of Diameter messages must not always be unordered: order is important at |
---|
45 | begining and end of a connection lifetime. It means we need agility to |
---|
46 | switch between "ordering enforced" and "ordering not enforced to counter |
---|
47 | HotLB" modes of operation. |
---|
48 | |
---|
49 | The connection state machine represented in RFC3588 (and rfc3588bis) is |
---|
50 | incomplete, because it lacks the SUSPECT state and the 3 DWR/DWA |
---|
51 | exchanges (section 5.1) when the peer recovers from this state. |
---|
52 | Personnally I don't see the rationale for exchanging 3 messages (why 3?) |
---|
53 | but, if we require at least 1 DWR/DWA exchange to be always performed |
---|
54 | after the CER/CEA exchange (and initiated by the peer that sent the |
---|
55 | CEA), we have a simple way to deal with our ordering problem, as resumed |
---|
56 | bellow. Peers are: [i]nitiator, [r]esponder. |
---|
57 | (1) [i] SCTP connection attempt. |
---|
58 | (2) [r] accept the connection. |
---|
59 | (3) [i,r] (if secure port) DTLS handshake, close on failure. |
---|
60 | (4) [i] Send CER |
---|
61 | (5) [r] Receive CER, send CEA using stream 0, flag "unordered" cleared. |
---|
62 | [r] Immediately send a DWR after the CEA, also using stream 0, |
---|
63 | flag "unordered" cleared. |
---|
64 | [r] Move to STATE_OPEN_NEW state -- equivalent to OPEN except |
---|
65 | that all messages are sent ordered at the moment. |
---|
66 | (6) [i] receive CEA, move to OPEN state. All messages can be sent |
---|
67 | unordered in OPEN state. |
---|
68 | [i] As per normal operation, reply with DWA to the DWR. |
---|
69 | (7) [r] Upon reception of the DWA, move to OPEN state, messages can be |
---|
70 | sent unordered from this point. |
---|
71 | |
---|
72 | Note about (5) and (6): if the Diameter Identity received in CER or CEA |
---|
73 | does not match the credentials from the certificate presented during |
---|
74 | DTLS handshake, we may need to specify a path of clean disconnection |
---|
75 | (not blocking the remote peer waiting for something). |
---|
76 | |
---|
77 | This proposed mechanism removes the problem of application messages |
---|
78 | received before the CEA by the initiator. Note that if the "old" inband |
---|
79 | TLS handshake is used, this handshake plays the same synchronization |
---|
80 | role than the new DWR/DWA, which becomes useless. |
---|
81 | |
---|
82 | |
---|
83 | The other time where ordering is important is by the end of connection |
---|
84 | lifetime, when one peer is shutting down the link for some reason |
---|
85 | (reboot, overload, no activity, etc...). In case of unordered delivery, |
---|
86 | we may have: |
---|
87 | - peer A sends an application message followed by a DPR. Peer B receives |
---|
88 | the DPR first and tears down the connection. Application message is lost. |
---|
89 | - Peer B sends an application message, then receives a DPR and answers a |
---|
90 | DPA. Peer A receives the DPA before the application message. The |
---|
91 | application message is lost. |
---|
92 | |
---|
93 | This situation is actually quite possible because DPR/DPA messages are |
---|
94 | very short, while application messages can be quite large. Therefore, |
---|
95 | they require much more time to deliver. |
---|
96 | |
---|
97 | I really cannot see a way to counter this effect by using the ordering |
---|
98 | of the messages, except by applying a timer (state STATE_CLOSING_GRACE). |
---|
99 | |
---|
100 | However, this problem must be balanced with the fact that the message |
---|
101 | that is lost will be in many cases sent again as the failover mechanism |
---|
102 | specifies. |
---|
103 | */ |
---|
104 | |
---|
105 | /* The actual declaration of peer_state_str */ |
---|
106 | DECLARE_STATE_STR(); |
---|
107 | |
---|
108 | /* Helper for next macro */ |
---|
109 | #define case_str( _val ) \ |
---|
110 | case _val : return #_val |
---|
111 | |
---|
112 | DECLARE_PEV_STR(); |
---|
113 | |
---|
114 | /************************************************************************/ |
---|
115 | /* Delayed startup */ |
---|
116 | /************************************************************************/ |
---|
117 | static int started = 0; |
---|
118 | static pthread_mutex_t started_mtx = PTHREAD_MUTEX_INITIALIZER; |
---|
119 | static pthread_cond_t started_cnd = PTHREAD_COND_INITIALIZER; |
---|
120 | |
---|
121 | /* Wait for start signal */ |
---|
122 | static int fd_psm_waitstart() |
---|
123 | { |
---|
124 | int ret = 0; |
---|
125 | TRACE_ENTRY(""); |
---|
126 | CHECK_POSIX( pthread_mutex_lock(&started_mtx) ); |
---|
127 | awake: |
---|
128 | if (!ret && !started) { |
---|
129 | pthread_cleanup_push( fd_cleanup_mutex, &started_mtx ); |
---|
130 | CHECK_POSIX_DO( ret = pthread_cond_wait(&started_cnd, &started_mtx), ); |
---|
131 | pthread_cleanup_pop( 0 ); |
---|
132 | goto awake; |
---|
133 | } |
---|
134 | CHECK_POSIX( pthread_mutex_unlock(&started_mtx) ); |
---|
135 | return ret; |
---|
136 | } |
---|
137 | |
---|
138 | /* Allow the state machines to start */ |
---|
139 | int fd_psm_start() |
---|
140 | { |
---|
141 | TRACE_ENTRY(""); |
---|
142 | CHECK_POSIX( pthread_mutex_lock(&started_mtx) ); |
---|
143 | started = 1; |
---|
144 | CHECK_POSIX( pthread_cond_broadcast(&started_cnd) ); |
---|
145 | CHECK_POSIX( pthread_mutex_unlock(&started_mtx) ); |
---|
146 | return 0; |
---|
147 | } |
---|
148 | |
---|
149 | |
---|
150 | /************************************************************************/ |
---|
151 | /* Manage the list of active peers */ |
---|
152 | /************************************************************************/ |
---|
153 | |
---|
154 | /* Enter/leave OPEN state */ |
---|
155 | static int enter_open_state(struct fd_peer * peer) |
---|
156 | { |
---|
157 | struct fd_list * li; |
---|
158 | CHECK_PARAMS( FD_IS_LIST_EMPTY(&peer->p_actives) ); |
---|
159 | |
---|
160 | /* Callback registered by the credential validator (fd_peer_validate_register) */ |
---|
161 | if (peer->p_cb2) { |
---|
162 | CHECK_FCT_DO( (*peer->p_cb2)(&peer->p_hdr.info), |
---|
163 | { |
---|
164 | TRACE_DEBUG(FULL, "Validation failed, terminating the connection"); |
---|
165 | fd_psm_terminate(peer, "DO_NOT_WANT_TO_TALK_TO_YOU" ); |
---|
166 | } ); |
---|
167 | peer->p_cb2 = NULL; |
---|
168 | return 0; |
---|
169 | } |
---|
170 | |
---|
171 | /* Insert in the active peers list */ |
---|
172 | CHECK_POSIX( pthread_rwlock_wrlock(&fd_g_activ_peers_rw) ); |
---|
173 | for (li = fd_g_activ_peers.next; li != &fd_g_activ_peers; li = li->next) { |
---|
174 | struct fd_peer * next_p = (struct fd_peer *)li->o; |
---|
175 | int cmp = fd_os_cmp(peer->p_hdr.info.pi_diamid, peer->p_hdr.info.pi_diamidlen, |
---|
176 | next_p->p_hdr.info.pi_diamid, next_p->p_hdr.info.pi_diamidlen); |
---|
177 | if (cmp < 0) |
---|
178 | break; |
---|
179 | } |
---|
180 | fd_list_insert_before(li, &peer->p_actives); |
---|
181 | CHECK_POSIX( pthread_rwlock_unlock(&fd_g_activ_peers_rw) ); |
---|
182 | |
---|
183 | /* Callback registered when the peer was added, by fd_peer_add */ |
---|
184 | if (peer->p_cb) { |
---|
185 | TRACE_DEBUG(FULL, "Calling add callback for peer %s", peer->p_hdr.info.pi_diamid); |
---|
186 | (*peer->p_cb)(&peer->p_hdr.info, peer->p_cb_data); /* TODO: do this in a separate detached thread? */ |
---|
187 | peer->p_cb = NULL; |
---|
188 | peer->p_cb_data = NULL; |
---|
189 | } |
---|
190 | |
---|
191 | /* Start the thread to handle outgoing messages */ |
---|
192 | CHECK_FCT( fd_out_start(peer) ); |
---|
193 | |
---|
194 | /* Update the expiry timer now */ |
---|
195 | CHECK_FCT( fd_p_expi_update(peer) ); |
---|
196 | |
---|
197 | return 0; |
---|
198 | } |
---|
199 | static int leave_open_state(struct fd_peer * peer) |
---|
200 | { |
---|
201 | /* Remove from active peers list */ |
---|
202 | CHECK_POSIX( pthread_rwlock_wrlock(&fd_g_activ_peers_rw) ); |
---|
203 | fd_list_unlink( &peer->p_actives ); |
---|
204 | CHECK_POSIX( pthread_rwlock_unlock(&fd_g_activ_peers_rw) ); |
---|
205 | |
---|
206 | /* Stop the "out" thread */ |
---|
207 | CHECK_FCT( fd_out_stop(peer) ); |
---|
208 | |
---|
209 | /* Failover the messages */ |
---|
210 | fd_peer_failover_msg(peer); |
---|
211 | |
---|
212 | return 0; |
---|
213 | } |
---|
214 | |
---|
215 | |
---|
216 | /************************************************************************/ |
---|
217 | /* Helpers for state changes */ |
---|
218 | /************************************************************************/ |
---|
219 | |
---|
220 | /* Cleanup pending events in the peer */ |
---|
221 | void fd_psm_events_free(struct fd_peer * peer) |
---|
222 | { |
---|
223 | struct fd_event * ev; |
---|
224 | /* Purge all events, and free the associated data if any */ |
---|
225 | while (fd_fifo_tryget( peer->p_events, &ev ) == 0) { |
---|
226 | switch (ev->code) { |
---|
227 | case FDEVP_CNX_ESTABLISHED: { |
---|
228 | fd_cnx_destroy(ev->data); |
---|
229 | } |
---|
230 | break; |
---|
231 | |
---|
232 | case FDEVP_TERMINATE: |
---|
233 | /* Do not free the string since it is a constant */ |
---|
234 | break; |
---|
235 | |
---|
236 | case FDEVP_CNX_INCOMING: { |
---|
237 | struct cnx_incoming * evd = ev->data; |
---|
238 | fd_msg_log( FD_MSG_LOG_DROPPED, evd->cer, "Message discarded while cleaning peer state machine queue." ); |
---|
239 | CHECK_FCT_DO( fd_msg_free(evd->cer), /* continue */); |
---|
240 | fd_cnx_destroy(evd->cnx); |
---|
241 | } |
---|
242 | default: |
---|
243 | free(ev->data); |
---|
244 | } |
---|
245 | free(ev); |
---|
246 | } |
---|
247 | } |
---|
248 | |
---|
249 | /* Read state */ |
---|
250 | int fd_peer_get_state(struct peer_hdr *peer) |
---|
251 | { |
---|
252 | int ret; |
---|
253 | |
---|
254 | struct fd_peer * p = (struct fd_peer *)peer; |
---|
255 | |
---|
256 | if (!CHECK_PEER(p)) |
---|
257 | return -1; |
---|
258 | |
---|
259 | CHECK_POSIX_DO( pthread_mutex_lock(&p->p_state_mtx), return -1 ); |
---|
260 | ret = p->p_state; |
---|
261 | CHECK_POSIX_DO( pthread_mutex_unlock(&p->p_state_mtx), return -1 ); |
---|
262 | |
---|
263 | return ret; |
---|
264 | } |
---|
265 | |
---|
266 | |
---|
267 | /* Change state */ |
---|
268 | int fd_psm_change_state(struct fd_peer * peer, int new_state) |
---|
269 | { |
---|
270 | int old; |
---|
271 | |
---|
272 | TRACE_ENTRY("%p %d(%s)", peer, new_state, STATE_STR(new_state)); |
---|
273 | CHECK_PARAMS( CHECK_PEER(peer) ); |
---|
274 | |
---|
275 | old = fd_peer_getstate(peer); |
---|
276 | if (old == new_state) |
---|
277 | return 0; |
---|
278 | |
---|
279 | TRACE_DEBUG(((old == STATE_OPEN) || (new_state == STATE_OPEN)) ? INFO : FULL, "'%s'\t-> '%s'\t'%s'", |
---|
280 | STATE_STR(old), |
---|
281 | STATE_STR(new_state), |
---|
282 | peer->p_hdr.info.pi_diamid); |
---|
283 | |
---|
284 | |
---|
285 | CHECK_POSIX( pthread_mutex_lock(&peer->p_state_mtx) ); |
---|
286 | peer->p_state = new_state; |
---|
287 | CHECK_POSIX( pthread_mutex_unlock(&peer->p_state_mtx) ); |
---|
288 | |
---|
289 | if (old == STATE_OPEN) { |
---|
290 | CHECK_FCT( leave_open_state(peer) ); |
---|
291 | } |
---|
292 | |
---|
293 | if (new_state == STATE_OPEN) { |
---|
294 | CHECK_FCT( enter_open_state(peer) ); |
---|
295 | } |
---|
296 | |
---|
297 | if (new_state == STATE_CLOSED) { |
---|
298 | /* Purge event list */ |
---|
299 | fd_psm_events_free(peer); |
---|
300 | |
---|
301 | /* If the peer is not persistant, we destroy it */ |
---|
302 | if (peer->p_hdr.info.config.pic_flags.persist == PI_PRST_NONE) { |
---|
303 | CHECK_FCT( fd_event_send(peer->p_events, FDEVP_TERMINATE, 0, NULL) ); |
---|
304 | } |
---|
305 | } |
---|
306 | |
---|
307 | return 0; |
---|
308 | } |
---|
309 | |
---|
310 | /* Set timeout timer of next event */ |
---|
311 | void fd_psm_next_timeout(struct fd_peer * peer, int add_random, int delay) |
---|
312 | { |
---|
313 | TRACE_DEBUG(FULL, "Peer timeout reset to %d seconds%s", delay, add_random ? " (+/- 2)" : "" ); |
---|
314 | |
---|
315 | /* Initialize the timer */ |
---|
316 | CHECK_POSIX_DO( clock_gettime( CLOCK_REALTIME, &peer->p_psm_timer ), ASSERT(0) ); |
---|
317 | |
---|
318 | if (add_random) { |
---|
319 | if (delay > 2) |
---|
320 | delay -= 2; |
---|
321 | else |
---|
322 | delay = 0; |
---|
323 | |
---|
324 | /* Add a random value between 0 and 4sec */ |
---|
325 | peer->p_psm_timer.tv_sec += random() % 4; |
---|
326 | peer->p_psm_timer.tv_nsec+= random() % 1000000000L; |
---|
327 | if (peer->p_psm_timer.tv_nsec > 1000000000L) { |
---|
328 | peer->p_psm_timer.tv_nsec -= 1000000000L; |
---|
329 | peer->p_psm_timer.tv_sec ++; |
---|
330 | } |
---|
331 | } |
---|
332 | |
---|
333 | peer->p_psm_timer.tv_sec += delay; |
---|
334 | |
---|
335 | #ifdef SLOW_PSM |
---|
336 | /* temporary for debug */ |
---|
337 | peer->p_psm_timer.tv_sec += 10; |
---|
338 | #endif |
---|
339 | } |
---|
340 | |
---|
341 | /* Cleanup the peer */ |
---|
342 | void fd_psm_cleanup(struct fd_peer * peer, int terminate) |
---|
343 | { |
---|
344 | /* Move to CLOSED state: failover messages, stop OUT thread, unlink peer from active list */ |
---|
345 | if (fd_peer_getstate(peer) != STATE_ZOMBIE) { |
---|
346 | CHECK_FCT_DO( fd_psm_change_state(peer, STATE_CLOSED), /* continue */ ); |
---|
347 | } |
---|
348 | |
---|
349 | fd_p_cnx_abort(peer, terminate); |
---|
350 | |
---|
351 | fd_p_ce_clear_cnx(peer, NULL); |
---|
352 | |
---|
353 | if (peer->p_receiver) { |
---|
354 | fd_cnx_destroy(peer->p_receiver); |
---|
355 | peer->p_receiver = NULL; |
---|
356 | } |
---|
357 | |
---|
358 | if (terminate) { |
---|
359 | fd_psm_events_free(peer); |
---|
360 | CHECK_FCT_DO( fd_fifo_del(&peer->p_events), /* continue */ ); |
---|
361 | } |
---|
362 | |
---|
363 | } |
---|
364 | |
---|
365 | |
---|
366 | /************************************************************************/ |
---|
367 | /* The PSM thread */ |
---|
368 | /************************************************************************/ |
---|
369 | /* Cancelation cleanup : set ZOMBIE state in the peer */ |
---|
370 | void cleanup_setstate(void * arg) |
---|
371 | { |
---|
372 | struct fd_peer * peer = (struct fd_peer *)arg; |
---|
373 | CHECK_PARAMS_DO( CHECK_PEER(peer), return ); |
---|
374 | CHECK_POSIX_DO( pthread_mutex_lock(&peer->p_state_mtx), ); |
---|
375 | peer->p_state = STATE_ZOMBIE; |
---|
376 | CHECK_POSIX_DO( pthread_mutex_unlock(&peer->p_state_mtx), ); |
---|
377 | return; |
---|
378 | } |
---|
379 | |
---|
380 | /* The state machine thread (controler) */ |
---|
381 | static void * p_psm_th( void * arg ) |
---|
382 | { |
---|
383 | struct fd_peer * peer = (struct fd_peer *)arg; |
---|
384 | int created_started = started ? 1 : 0; |
---|
385 | int event; |
---|
386 | size_t ev_sz; |
---|
387 | void * ev_data; |
---|
388 | int cur_state; |
---|
389 | |
---|
390 | CHECK_PARAMS_DO( CHECK_PEER(peer), ASSERT(0) ); |
---|
391 | |
---|
392 | pthread_cleanup_push( cleanup_setstate, arg ); |
---|
393 | |
---|
394 | /* Set the thread name */ |
---|
395 | { |
---|
396 | char buf[48]; |
---|
397 | snprintf(buf, sizeof(buf), "PSM/%s", peer->p_hdr.info.pi_diamid); |
---|
398 | fd_log_threadname ( buf ); |
---|
399 | } |
---|
400 | |
---|
401 | /* The state machine starts in CLOSED state */ |
---|
402 | CHECK_POSIX_DO( pthread_mutex_lock(&peer->p_state_mtx), goto psm_end ); |
---|
403 | peer->p_state = STATE_CLOSED; |
---|
404 | CHECK_POSIX_DO( pthread_mutex_unlock(&peer->p_state_mtx), goto psm_end ); |
---|
405 | |
---|
406 | /* Wait that the PSM are authorized to start in the daemon */ |
---|
407 | CHECK_FCT_DO( fd_psm_waitstart(), goto psm_end ); |
---|
408 | |
---|
409 | /* Initialize the timer */ |
---|
410 | if (peer->p_flags.pf_responder) { |
---|
411 | fd_psm_next_timeout(peer, 0, INCNX_TIMEOUT); |
---|
412 | } else { |
---|
413 | fd_psm_next_timeout(peer, created_started, 0); |
---|
414 | } |
---|
415 | |
---|
416 | psm_loop: |
---|
417 | /* Get next event */ |
---|
418 | TRACE_DEBUG(FULL, "'%s' in state '%s' waiting for next event.", |
---|
419 | peer->p_hdr.info.pi_diamid, STATE_STR(fd_peer_getstate(peer))); |
---|
420 | CHECK_FCT_DO( fd_event_timedget(peer->p_events, &peer->p_psm_timer, FDEVP_PSM_TIMEOUT, &event, &ev_sz, &ev_data), goto psm_end ); |
---|
421 | |
---|
422 | cur_state = fd_peer_getstate(peer); |
---|
423 | if (cur_state == -1) |
---|
424 | goto psm_end; |
---|
425 | |
---|
426 | TRACE_DEBUG(FULL, "'%s'\t<-- '%s'\t(%p,%zd)\t'%s'", |
---|
427 | STATE_STR(cur_state), |
---|
428 | fd_pev_str(event), ev_data, ev_sz, |
---|
429 | peer->p_hdr.info.pi_diamid); |
---|
430 | |
---|
431 | /* Now, the action depends on the current state and the incoming event */ |
---|
432 | |
---|
433 | /* The following states are impossible */ |
---|
434 | ASSERT( cur_state != STATE_NEW ); |
---|
435 | ASSERT( cur_state != STATE_ZOMBIE ); |
---|
436 | ASSERT( cur_state != STATE_OPEN_HANDSHAKE ); /* because it should exist only between two loops */ |
---|
437 | |
---|
438 | /* Purge invalid events */ |
---|
439 | if (!CHECK_PEVENT(event)) { |
---|
440 | TRACE_DEBUG(INFO, "Invalid event received in PSM '%s' : %d", peer->p_hdr.info.pi_diamid, event); |
---|
441 | ASSERT(0); /* we should investigate this situation */ |
---|
442 | goto psm_loop; |
---|
443 | } |
---|
444 | |
---|
445 | /* Handle the (easy) debug event now */ |
---|
446 | if (event == FDEVP_DUMP_ALL) { |
---|
447 | fd_peer_dump(peer, ANNOYING); |
---|
448 | goto psm_loop; |
---|
449 | } |
---|
450 | |
---|
451 | /* Requests to terminate the peer object */ |
---|
452 | if (event == FDEVP_TERMINATE) { |
---|
453 | switch (cur_state) { |
---|
454 | case STATE_OPEN: |
---|
455 | case STATE_OPEN_NEW: |
---|
456 | case STATE_REOPEN: |
---|
457 | /* We cannot just close the connection, we have to send a DPR first */ |
---|
458 | CHECK_FCT_DO( fd_p_dp_initiate(peer, ev_data), goto psm_end ); |
---|
459 | goto psm_loop; |
---|
460 | |
---|
461 | /* |
---|
462 | case STATE_CLOSING: |
---|
463 | case STATE_CLOSING_GRACE: |
---|
464 | case STATE_WAITCNXACK: |
---|
465 | case STATE_WAITCNXACK_ELEC: |
---|
466 | case STATE_WAITCEA: |
---|
467 | case STATE_SUSPECT: |
---|
468 | case STATE_CLOSED: |
---|
469 | */ |
---|
470 | default: |
---|
471 | /* In these cases, we just cleanup the peer object (if needed) and terminate */ |
---|
472 | goto psm_end; |
---|
473 | } |
---|
474 | } |
---|
475 | |
---|
476 | /* A message was received */ |
---|
477 | if (event == FDEVP_CNX_MSG_RECV) { |
---|
478 | struct msg * msg = NULL; |
---|
479 | struct msg_hdr * hdr; |
---|
480 | |
---|
481 | /* Parse the received buffer */ |
---|
482 | CHECK_FCT_DO( fd_msg_parse_buffer( (void *)&ev_data, ev_sz, &msg), |
---|
483 | { |
---|
484 | fd_log_debug("Received invalid data from peer '%s', closing the connection\n", peer->p_hdr.info.pi_diamid); |
---|
485 | free(ev_data); |
---|
486 | CHECK_FCT_DO( fd_event_send(peer->p_events, FDEVP_CNX_ERROR, 0, NULL), goto psm_reset ); |
---|
487 | goto psm_loop; |
---|
488 | } ); |
---|
489 | |
---|
490 | /* If the current state does not allow receiving messages, just drop it */ |
---|
491 | if (cur_state == STATE_CLOSED) { |
---|
492 | /* In such case, just discard the message */ |
---|
493 | fd_msg_log( FD_MSG_LOG_DROPPED, msg, "Purged from peer '%s''s queue (CLOSED state).", peer->p_hdr.info.pi_diamid ); |
---|
494 | fd_msg_free(msg); |
---|
495 | goto psm_loop; |
---|
496 | } |
---|
497 | |
---|
498 | /* Log incoming message */ |
---|
499 | fd_msg_log( FD_MSG_LOG_RECEIVED, msg, "Received %zdb from '%s' (%s)", ev_sz, peer->p_hdr.info.pi_diamid, STATE_STR(cur_state) ); |
---|
500 | |
---|
501 | /* Extract the header */ |
---|
502 | CHECK_FCT_DO( fd_msg_hdr(msg, &hdr), goto psm_end ); |
---|
503 | |
---|
504 | /* If it is an answer, associate with the request or drop */ |
---|
505 | if (!(hdr->msg_flags & CMD_FLAG_REQUEST)) { |
---|
506 | struct msg * req; |
---|
507 | /* Search matching request (same hbhid) */ |
---|
508 | CHECK_FCT_DO( fd_p_sr_fetch(&peer->p_sr, hdr->msg_hbhid, &req), goto psm_end ); |
---|
509 | if (req == NULL) { |
---|
510 | fd_msg_log( FD_MSG_LOG_DROPPED, msg, "Answer received with no corresponding sent request." ); |
---|
511 | fd_msg_free(msg); |
---|
512 | goto psm_loop; |
---|
513 | } |
---|
514 | |
---|
515 | /* Associate */ |
---|
516 | CHECK_FCT_DO( fd_msg_answ_associate( msg, req ), goto psm_end ); |
---|
517 | } |
---|
518 | |
---|
519 | if (cur_state == STATE_OPEN_NEW) { |
---|
520 | /* OK, we have received something, so the connection is supposedly now in OPEN state at the remote site */ |
---|
521 | fd_psm_change_state(peer, STATE_OPEN ); |
---|
522 | } |
---|
523 | |
---|
524 | /* Now handle non-link-local messages */ |
---|
525 | if (fd_msg_is_routable(msg)) { |
---|
526 | switch (cur_state) { |
---|
527 | /* To maximize compatibility -- should not be a security issue here */ |
---|
528 | case STATE_REOPEN: |
---|
529 | case STATE_SUSPECT: |
---|
530 | case STATE_CLOSING: |
---|
531 | case STATE_CLOSING_GRACE: |
---|
532 | TRACE_DEBUG(FULL, "Accepted a message while not in OPEN state... "); |
---|
533 | /* The standard situation : */ |
---|
534 | case STATE_OPEN_NEW: |
---|
535 | case STATE_OPEN: |
---|
536 | /* We received a valid routable message, update the expiry timer */ |
---|
537 | CHECK_FCT_DO( fd_p_expi_update(peer), goto psm_end ); |
---|
538 | |
---|
539 | /* Set the message source and add the Route-Record */ |
---|
540 | CHECK_FCT_DO( fd_msg_source_set( msg, peer->p_hdr.info.pi_diamid, peer->p_hdr.info.pi_diamidlen, 1, fd_g_config->cnf_dict ), goto psm_end); |
---|
541 | |
---|
542 | /* Requeue to the global incoming queue */ |
---|
543 | CHECK_FCT_DO(fd_fifo_post(fd_g_incoming, &msg), goto psm_end ); |
---|
544 | |
---|
545 | /* Update the peer timer (only in OPEN state) */ |
---|
546 | if ((cur_state == STATE_OPEN) && (!peer->p_flags.pf_dw_pending)) { |
---|
547 | fd_psm_next_timeout(peer, 1, peer->p_hdr.info.config.pic_twtimer ?: fd_g_config->cnf_timer_tw); |
---|
548 | } |
---|
549 | break; |
---|
550 | |
---|
551 | /* In other states, we discard the message, it is either old or invalid to send it for the remote peer */ |
---|
552 | case STATE_WAITCNXACK: |
---|
553 | case STATE_WAITCNXACK_ELEC: |
---|
554 | case STATE_WAITCEA: |
---|
555 | case STATE_CLOSED: |
---|
556 | default: |
---|
557 | /* In such case, just discard the message */ |
---|
558 | fd_msg_log( FD_MSG_LOG_DROPPED, msg, "Received from peer '%s' while connection was not in state %s.", peer->p_hdr.info.pi_diamid, STATE_STR(cur_state) ); |
---|
559 | fd_msg_free(msg); |
---|
560 | } |
---|
561 | goto psm_loop; |
---|
562 | } |
---|
563 | |
---|
564 | /* Link-local message: They must be understood by our dictionary, otherwise we return an error */ |
---|
565 | { |
---|
566 | int ret = fd_msg_parse_or_error( &msg ); |
---|
567 | if (ret != EBADMSG) { |
---|
568 | CHECK_FCT_DO( ret, goto psm_end ); |
---|
569 | } else { |
---|
570 | if (msg) { |
---|
571 | /* Send the error back to the peer */ |
---|
572 | CHECK_FCT_DO( ret = fd_out_send(&msg, NULL, peer, FD_CNX_ORDERED), ); |
---|
573 | if (msg) { |
---|
574 | /* Only if an error occurred & the message was not saved / dumped */ |
---|
575 | fd_msg_log( FD_MSG_LOG_DROPPED, msg, "Internal error: Problem while sending (%s)\n", strerror(ret) ); |
---|
576 | CHECK_FCT_DO( fd_msg_free(msg), goto psm_end); |
---|
577 | } |
---|
578 | } else { |
---|
579 | /* We received an invalid answer, let's disconnect */ |
---|
580 | CHECK_FCT_DO( fd_event_send(peer->p_events, FDEVP_CNX_ERROR, 0, NULL), goto psm_reset ); |
---|
581 | } |
---|
582 | goto psm_loop; |
---|
583 | } |
---|
584 | } |
---|
585 | |
---|
586 | /* Handle the LL message and update the expiry timer appropriately */ |
---|
587 | switch (hdr->msg_code) { |
---|
588 | case CC_CAPABILITIES_EXCHANGE: |
---|
589 | CHECK_FCT_DO( fd_p_ce_msgrcv(&msg, (hdr->msg_flags & CMD_FLAG_REQUEST), peer), goto psm_reset ); |
---|
590 | break; |
---|
591 | |
---|
592 | case CC_DISCONNECT_PEER: |
---|
593 | CHECK_FCT_DO( fd_p_dp_handle(&msg, (hdr->msg_flags & CMD_FLAG_REQUEST), peer), goto psm_reset ); |
---|
594 | if (fd_peer_getstate(peer) == STATE_CLOSING) |
---|
595 | goto psm_end; |
---|
596 | |
---|
597 | break; |
---|
598 | |
---|
599 | case CC_DEVICE_WATCHDOG: |
---|
600 | CHECK_FCT_DO( fd_p_dw_handle(&msg, (hdr->msg_flags & CMD_FLAG_REQUEST), peer), goto psm_reset ); |
---|
601 | break; |
---|
602 | |
---|
603 | default: |
---|
604 | /* Unknown / unexpected / invalid message -- but validated by our dictionary */ |
---|
605 | TRACE_DEBUG(INFO, "Invalid non-routable command received: %u.", hdr->msg_code); |
---|
606 | if (hdr->msg_flags & CMD_FLAG_REQUEST) { |
---|
607 | do { |
---|
608 | /* Reply with an error code */ |
---|
609 | CHECK_FCT_DO( fd_msg_new_answer_from_req ( fd_g_config->cnf_dict, &msg, MSGFL_ANSW_ERROR ), break ); |
---|
610 | |
---|
611 | /* Set the error code */ |
---|
612 | CHECK_FCT_DO( fd_msg_rescode_set(msg, "DIAMETER_COMMAND_UNSUPPORTED", "Or maybe the P-bit or application Id are erroneous.", NULL, 1 ), break ); |
---|
613 | |
---|
614 | /* Send the answer */ |
---|
615 | CHECK_FCT_DO( fd_out_send(&msg, peer->p_cnxctx, peer, FD_CNX_ORDERED), break ); |
---|
616 | } while (0); |
---|
617 | } else { |
---|
618 | /* We did ASK for it ??? */ |
---|
619 | TRACE_DEBUG(INFO, "Received answer with erroneous 'is_routable' result..."); |
---|
620 | } |
---|
621 | |
---|
622 | /* Cleanup the message if not done */ |
---|
623 | if (msg) { |
---|
624 | fd_msg_log( FD_MSG_LOG_DROPPED, msg, "Received un-handled non-routable command from peer '%s'.", peer->p_hdr.info.pi_diamid ); |
---|
625 | CHECK_FCT_DO( fd_msg_free(msg), /* continue */); |
---|
626 | msg = NULL; |
---|
627 | } |
---|
628 | }; |
---|
629 | |
---|
630 | /* At this point the message must have been fully handled already */ |
---|
631 | if (msg) { |
---|
632 | fd_msg_log( FD_MSG_LOG_DROPPED, msg, "Internal error: unhandled message.", peer->p_hdr.info.pi_diamid ); |
---|
633 | fd_msg_free(msg); |
---|
634 | } |
---|
635 | |
---|
636 | goto psm_loop; |
---|
637 | } |
---|
638 | |
---|
639 | /* The connection object is broken */ |
---|
640 | if (event == FDEVP_CNX_ERROR) { |
---|
641 | switch (cur_state) { |
---|
642 | case STATE_WAITCNXACK_ELEC: |
---|
643 | /* Abort the initiating side */ |
---|
644 | fd_p_cnx_abort(peer, 0); |
---|
645 | /* Process the receiver side */ |
---|
646 | CHECK_FCT_DO( fd_p_ce_process_receiver(peer), goto psm_end ); |
---|
647 | break; |
---|
648 | |
---|
649 | case STATE_WAITCEA: |
---|
650 | case STATE_OPEN: |
---|
651 | case STATE_OPEN_NEW: |
---|
652 | case STATE_REOPEN: |
---|
653 | case STATE_WAITCNXACK: |
---|
654 | case STATE_SUSPECT: |
---|
655 | default: |
---|
656 | /* Mark the connection problem */ |
---|
657 | peer->p_flags.pf_cnx_pb = 1; |
---|
658 | |
---|
659 | /* Destroy the connection, restart the timer to a new connection attempt */ |
---|
660 | fd_psm_next_timeout(peer, 1, peer->p_hdr.info.config.pic_tctimer ?: fd_g_config->cnf_timer_tc); |
---|
661 | |
---|
662 | case STATE_CLOSED: |
---|
663 | goto psm_reset; |
---|
664 | |
---|
665 | case STATE_CLOSING: |
---|
666 | /* We sent a DPR so we are terminating, do not wait for DPA */ |
---|
667 | goto psm_end; |
---|
668 | |
---|
669 | case STATE_CLOSING_GRACE: |
---|
670 | if (peer->p_flags.pf_localterm) /* initiated here */ |
---|
671 | goto psm_end; |
---|
672 | |
---|
673 | fd_psm_cleanup(peer, 0); |
---|
674 | |
---|
675 | /* Reset the timer for next connection attempt */ |
---|
676 | fd_psm_next_timeout(peer, 1, fd_p_dp_newdelay(peer)); |
---|
677 | goto psm_loop; |
---|
678 | } |
---|
679 | goto psm_loop; |
---|
680 | } |
---|
681 | |
---|
682 | /* The connection notified a change in endpoints */ |
---|
683 | if (event == FDEVP_CNX_EP_CHANGE) { |
---|
684 | /* We actually don't care if we are in OPEN state here... */ |
---|
685 | |
---|
686 | /* Cleanup the remote LL and primary addresses */ |
---|
687 | CHECK_FCT_DO( fd_ep_filter( &peer->p_hdr.info.pi_endpoints, EP_FL_CONF | EP_FL_DISC | EP_FL_ADV ), /* ignore the error */); |
---|
688 | CHECK_FCT_DO( fd_ep_clearflags( &peer->p_hdr.info.pi_endpoints, EP_FL_PRIMARY ), /* ignore the error */); |
---|
689 | |
---|
690 | /* Get the new ones */ |
---|
691 | CHECK_FCT_DO( fd_cnx_getremoteeps(peer->p_cnxctx, &peer->p_hdr.info.pi_endpoints), /* ignore the error */); |
---|
692 | |
---|
693 | /* We do not support local endpoints change currently, but it could be added here if needed (refresh fd_g_config->cnf_endpoints)*/ |
---|
694 | |
---|
695 | if (TRACE_BOOL(ANNOYING)) { |
---|
696 | TRACE_DEBUG(ANNOYING, "New remote endpoint(s):" ); |
---|
697 | fd_ep_dump(6, &peer->p_hdr.info.pi_endpoints); |
---|
698 | } |
---|
699 | |
---|
700 | /* Done */ |
---|
701 | goto psm_loop; |
---|
702 | } |
---|
703 | |
---|
704 | /* A new connection was established and CER containing this peer id was received */ |
---|
705 | if (event == FDEVP_CNX_INCOMING) { |
---|
706 | struct cnx_incoming * params = ev_data; |
---|
707 | ASSERT(params); |
---|
708 | |
---|
709 | /* Handle the message */ |
---|
710 | CHECK_FCT_DO( fd_p_ce_handle_newCER(¶ms->cer, peer, ¶ms->cnx, params->validate), goto psm_end ); |
---|
711 | |
---|
712 | /* Cleanup if needed */ |
---|
713 | if (params->cnx) { |
---|
714 | fd_cnx_destroy(params->cnx); |
---|
715 | params->cnx = NULL; |
---|
716 | } |
---|
717 | if (params->cer) { |
---|
718 | fd_msg_log( FD_MSG_LOG_DROPPED, params->cer, "Internal error: this CER was not handled as expected." ); |
---|
719 | CHECK_FCT_DO( fd_msg_free(params->cer), ); |
---|
720 | params->cer = NULL; |
---|
721 | } |
---|
722 | |
---|
723 | /* Loop */ |
---|
724 | free(ev_data); |
---|
725 | goto psm_loop; |
---|
726 | } |
---|
727 | |
---|
728 | /* A new connection has been established with the remote peer */ |
---|
729 | if (event == FDEVP_CNX_ESTABLISHED) { |
---|
730 | struct cnxctx * cnx = ev_data; |
---|
731 | |
---|
732 | /* Release the resources of the connecting thread */ |
---|
733 | CHECK_POSIX_DO( pthread_join( peer->p_ini_thr, NULL), /* ignore, it is not a big deal */); |
---|
734 | peer->p_ini_thr = (pthread_t)NULL; |
---|
735 | |
---|
736 | switch (cur_state) { |
---|
737 | case STATE_WAITCNXACK_ELEC: |
---|
738 | case STATE_WAITCNXACK: |
---|
739 | fd_p_ce_handle_newcnx(peer, cnx); |
---|
740 | break; |
---|
741 | |
---|
742 | default: |
---|
743 | /* Just abort the attempt and continue */ |
---|
744 | TRACE_DEBUG(FULL, "Connection attempt successful but current state is %s, closing... (too slow?)", STATE_STR(cur_state)); |
---|
745 | fd_cnx_destroy(cnx); |
---|
746 | } |
---|
747 | |
---|
748 | goto psm_loop; |
---|
749 | } |
---|
750 | |
---|
751 | /* A new connection has not been established with the remote peer */ |
---|
752 | if (event == FDEVP_CNX_FAILED) { |
---|
753 | |
---|
754 | /* Release the resources of the connecting thread */ |
---|
755 | CHECK_POSIX_DO( pthread_join( peer->p_ini_thr, NULL), /* ignore, it is not a big deal */); |
---|
756 | peer->p_ini_thr = (pthread_t)NULL; |
---|
757 | |
---|
758 | switch (cur_state) { |
---|
759 | case STATE_WAITCNXACK_ELEC: |
---|
760 | /* Abort the initiating side */ |
---|
761 | fd_p_cnx_abort(peer, 0); |
---|
762 | /* Process the receiver side */ |
---|
763 | CHECK_FCT_DO( fd_p_ce_process_receiver(peer), goto psm_end ); |
---|
764 | break; |
---|
765 | |
---|
766 | case STATE_WAITCNXACK: |
---|
767 | /* Go back to CLOSE */ |
---|
768 | fd_psm_next_timeout(peer, 1, peer->p_hdr.info.config.pic_tctimer ?: fd_g_config->cnf_timer_tc); |
---|
769 | goto psm_reset; |
---|
770 | |
---|
771 | default: |
---|
772 | /* Just ignore */ |
---|
773 | TRACE_DEBUG(FULL, "Connection attempt failed but current state is %s, ignoring...", STATE_STR(cur_state)); |
---|
774 | } |
---|
775 | |
---|
776 | goto psm_loop; |
---|
777 | } |
---|
778 | |
---|
779 | /* The timeout for the current state has been reached */ |
---|
780 | if (event == FDEVP_PSM_TIMEOUT) { |
---|
781 | switch (cur_state) { |
---|
782 | case STATE_OPEN: |
---|
783 | case STATE_REOPEN: |
---|
784 | case STATE_OPEN_NEW: |
---|
785 | CHECK_FCT_DO( fd_p_dw_timeout(peer), goto psm_end ); |
---|
786 | goto psm_loop; |
---|
787 | |
---|
788 | case STATE_CLOSED: |
---|
789 | CHECK_FCT_DO( fd_psm_change_state(peer, STATE_WAITCNXACK), goto psm_end ); |
---|
790 | fd_psm_next_timeout(peer, 0, CNX_TIMEOUT); |
---|
791 | CHECK_FCT_DO( fd_p_cnx_init(peer), goto psm_end ); |
---|
792 | goto psm_loop; |
---|
793 | |
---|
794 | case STATE_SUSPECT: |
---|
795 | /* Mark the connection problem */ |
---|
796 | peer->p_flags.pf_cnx_pb = 1; |
---|
797 | case STATE_CLOSING: |
---|
798 | case STATE_WAITCNXACK: |
---|
799 | case STATE_WAITCEA: |
---|
800 | /* Destroy the connection, restart the timer to a new connection attempt */ |
---|
801 | fd_psm_next_timeout(peer, 1, peer->p_hdr.info.config.pic_tctimer ?: fd_g_config->cnf_timer_tc); |
---|
802 | goto psm_reset; |
---|
803 | |
---|
804 | case STATE_CLOSING_GRACE: |
---|
805 | /* The grace period is completed, now close */ |
---|
806 | if (peer->p_flags.pf_localterm) |
---|
807 | goto psm_end; |
---|
808 | |
---|
809 | fd_psm_cleanup(peer, 0); |
---|
810 | /* Reset the timer for next connection attempt */ |
---|
811 | fd_psm_next_timeout(peer, 1, fd_p_dp_newdelay(peer)); |
---|
812 | goto psm_loop; |
---|
813 | |
---|
814 | case STATE_WAITCNXACK_ELEC: |
---|
815 | /* Abort the initiating side */ |
---|
816 | fd_p_cnx_abort(peer, 0); |
---|
817 | /* Process the receiver side */ |
---|
818 | CHECK_FCT_DO( fd_p_ce_process_receiver(peer), goto psm_end ); |
---|
819 | goto psm_loop; |
---|
820 | |
---|
821 | default: |
---|
822 | ASSERT(0); /* implementation problem, we did not foresee this case? */ |
---|
823 | } |
---|
824 | } |
---|
825 | |
---|
826 | /* Default action : the handling has not yet been implemented. [for debug only] */ |
---|
827 | TRACE_DEBUG(INFO, "Missing handler in PSM for '%s'\t<-- '%s'", STATE_STR(cur_state), fd_pev_str(event)); |
---|
828 | psm_reset: |
---|
829 | if (peer->p_flags.pf_delete) |
---|
830 | goto psm_end; |
---|
831 | fd_psm_cleanup(peer, 0); |
---|
832 | goto psm_loop; |
---|
833 | |
---|
834 | psm_end: |
---|
835 | fd_psm_cleanup(peer, 1); |
---|
836 | TRACE_DEBUG(INFO, "'%s'\t-> STATE_ZOMBIE (terminated)\t'%s'", |
---|
837 | STATE_STR(fd_peer_getstate(peer)), |
---|
838 | peer->p_hdr.info.pi_diamid); |
---|
839 | pthread_cleanup_pop(1); /* set STATE_ZOMBIE */ |
---|
840 | peer->p_psm = (pthread_t)NULL; |
---|
841 | pthread_detach(pthread_self()); |
---|
842 | return NULL; |
---|
843 | } |
---|
844 | |
---|
845 | |
---|
846 | /************************************************************************/ |
---|
847 | /* Functions to control the PSM */ |
---|
848 | /************************************************************************/ |
---|
849 | /* Create the PSM thread of one peer structure */ |
---|
850 | int fd_psm_begin(struct fd_peer * peer ) |
---|
851 | { |
---|
852 | TRACE_ENTRY("%p", peer); |
---|
853 | |
---|
854 | /* Check the peer and state are OK */ |
---|
855 | CHECK_PARAMS( fd_peer_getstate(peer) == STATE_NEW ); |
---|
856 | |
---|
857 | /* Create the FIFO for events */ |
---|
858 | CHECK_FCT( fd_fifo_new(&peer->p_events) ); |
---|
859 | |
---|
860 | /* Create the PSM controler thread */ |
---|
861 | CHECK_POSIX( pthread_create( &peer->p_psm, NULL, p_psm_th, peer ) ); |
---|
862 | |
---|
863 | /* We're done */ |
---|
864 | return 0; |
---|
865 | } |
---|
866 | |
---|
867 | /* End the PSM (clean ending) */ |
---|
868 | int fd_psm_terminate(struct fd_peer * peer, char * reason ) |
---|
869 | { |
---|
870 | TRACE_ENTRY("%p", peer); |
---|
871 | CHECK_PARAMS( CHECK_PEER(peer) ); |
---|
872 | |
---|
873 | if (fd_peer_getstate(peer) != STATE_ZOMBIE) { |
---|
874 | CHECK_FCT( fd_event_send(peer->p_events, FDEVP_TERMINATE, 0, reason) ); |
---|
875 | } else { |
---|
876 | TRACE_DEBUG(FULL, "Peer '%s' was already terminated", peer->p_hdr.info.pi_diamid); |
---|
877 | } |
---|
878 | return 0; |
---|
879 | } |
---|
880 | |
---|
881 | /* End the PSM & cleanup the peer structure */ |
---|
882 | void fd_psm_abord(struct fd_peer * peer ) |
---|
883 | { |
---|
884 | TRACE_ENTRY("%p", peer); |
---|
885 | |
---|
886 | /* Cancel PSM thread */ |
---|
887 | CHECK_FCT_DO( fd_thr_term(&peer->p_psm), /* continue */ ); |
---|
888 | |
---|
889 | /* Cleanup the data */ |
---|
890 | fd_psm_cleanup(peer, 1); |
---|
891 | |
---|
892 | /* Destroy the event list */ |
---|
893 | CHECK_FCT_DO( fd_fifo_del(&peer->p_events), /* continue */ ); |
---|
894 | |
---|
895 | /* Remaining cleanups are performed in fd_peer_free */ |
---|
896 | return; |
---|
897 | } |
---|
898 | |
---|