Changeset 1010:357c2f892d24 in freeDiameter
- Timestamp:
- Mar 26, 2013, 12:39:32 AM (11 years ago)
- Branch:
- default
- Phase:
- public
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
include/freeDiameter/libfdcore.h
r1008 r1010 390 390 * PARAMETERS: 391 391 * peer : The peer which load to read 392 * to_receive : (out) number of requests sent to this peer without matching answer yet. 393 * to_send : (out) number of requests received from this peer and not yet answered. 392 394 * 393 395 * DESCRIPTION: … … 400 402 * !0 : An error occurred 401 403 */ 402 int fd_peer_get_load_pending(struct peer_hdr *peer, int * load);404 int fd_peer_get_load_pending(struct peer_hdr *peer, long * to_receive, long * to_send); 403 405 404 406 /* -
libfdcore/fdcore-internal.h
r938 r1010 71 71 #endif /* DPR_TIMEOUT */ 72 72 73 /* Delai where the connection is maintained opened to allow exchanging remaining pending answers after DPR/DPA */ 74 #ifndef GRACE_TIMEOUT 75 #define GRACE_TIMEOUT 1 /* in seconds */ 76 #endif /* GRACE_TIMEOUT */ 77 73 78 /* The Vendor-Id to advertise in CER/CEA */ 74 79 #ifndef MY_VENDOR_ID … … 126 131 struct fd_list srs; /* requests ordered by hop-by-hop id */ 127 132 struct fd_list exp; /* requests that have a timeout set, ordered by timeout */ 128 intcnt; /* number of requests in the srs list */133 long cnt; /* number of requests in the srs list */ 129 134 pthread_mutex_t mtx; /* mutex to protect these lists */ 130 135 pthread_cond_t cnd; /* cond var used by the thread that handles timeouts */ … … 181 186 /* Sent requests (for fallback), list of struct sentreq ordered by hbh */ 182 187 struct sr_list p_sr; 188 189 /* Pending received requests not yet answered (count only) */ 190 long p_reqin_count; /* We use p_state_mtx to protect this value */ 183 191 184 192 /* Data for transitional states before the peer is in OPEN state */ -
libfdcore/p_ce.c
r975 r1010 919 919 } 920 920 921 /* Update the counter to match with the answer being sent */ 922 CHECK_POSIX( pthread_mutex_lock(&peer->p_state_mtx) ); 923 peer->p_reqin_count++; 924 CHECK_POSIX( pthread_mutex_unlock(&peer->p_state_mtx) ); 925 921 926 /* Reply a CEA */ 922 927 CHECK_FCT( fd_msg_new_answer_from_req ( fd_g_config->cnf_dict, &msg, 0 ) ); -
libfdcore/p_dp.c
r974 r1010 63 63 int fd_p_dp_handle(struct msg ** msg, int req, struct fd_peer * peer) 64 64 { 65 long to_receive, to_send; 65 66 TRACE_ENTRY("%p %d %p", msg, req, peer); 66 67 … … 110 111 CHECK_FCT( fd_msg_rescode_set( *msg, "DIAMETER_SUCCESS", NULL, NULL, 1 ) ); 111 112 112 /* Move to CLOSING state to failover outgoing messages (and avoid failing over the DPA...) */ 113 CHECK_FCT( fd_psm_change_state(peer, STATE_CLOSING) ); 114 115 /* Now send the DPA */ 116 CHECK_FCT( fd_out_send( msg, NULL, peer, FD_CNX_ORDERED) ); 117 118 if (fd_cnx_isMultichan(peer->p_cnxctx)) { 119 /* There is a possibililty that messages are still in the pipe coming here, so let's grace for 1 second */ 120 CHECK_FCT( fd_psm_change_state(peer, STATE_CLOSING_GRACE) ); 121 fd_psm_next_timeout(peer, 0, 1); 113 /* Do we have pending exchanges with this peer? */ 114 CHECK_FCT( fd_peer_get_load_pending(&peer->p_hdr, &to_receive, &to_send) ); 115 116 if ((to_receive == 0) && (to_send == 1 /* only the DPA */)) { 117 /* No pending exchange, move to CLOSING directly */ 118 CHECK_FCT( fd_psm_change_state(peer, STATE_CLOSING) ); 119 120 /* Now send the DPA */ 121 CHECK_FCT( fd_out_send( msg, NULL, peer, FD_CNX_ORDERED) ); 122 122 123 } else { 124 /* Move to CLOSED state */ 123 /* and move to CLOSED */ 125 124 fd_psm_cleanup(peer, 0); 126 125 127 126 /* Reset the timer for next connection attempt -- we'll retry sooner or later depending on the disconnection cause */ 128 127 fd_psm_next_timeout(peer, 1, fd_p_dp_newdelay(peer)); 128 } else { 129 /* We have pending exchanges, we move to CLOSING_GRACE which allows exchanges of answers but 130 not new requests */ 131 CHECK_FCT( fd_psm_change_state(peer, STATE_CLOSING_GRACE) ); 132 fd_psm_next_timeout(peer, 0, GRACE_TIMEOUT); 133 134 /* Now send the DPA */ 135 CHECK_FCT( fd_out_send( msg, NULL, peer, FD_CNX_ORDERED) ); 129 136 } 130 137 } else { 131 138 /* We received a DPA */ 132 139 int curstate = fd_peer_getstate(peer); 133 if (curstate != STATE_CLOSING ) {140 if (curstate != STATE_CLOSING_GRACE) { 134 141 TRACE_DEBUG(INFO, "Ignoring DPA received in state %s", STATE_STR(curstate)); 135 142 } … … 141 148 *msg = NULL; 142 149 143 if (fd_cnx_isMultichan(peer->p_cnxctx)) { 144 CHECK_FCT( fd_psm_change_state(peer, STATE_CLOSING_GRACE) ); 145 fd_psm_next_timeout(peer, 0, 1); 150 /* Do we still have pending exchanges with this peer? */ 151 CHECK_FCT( fd_peer_get_load_pending(&peer->p_hdr, &to_receive, &to_send) ); 152 if ((to_receive != 0) || (to_send != 0)) { 153 TRACE_DEBUG(INFO, "Received DPA but pending load: [%ld, %ld], giving grace delay before closing", to_receive, to_send); 154 fd_psm_next_timeout(peer, 0, GRACE_TIMEOUT); 146 155 peer->p_flags.pf_localterm = 1; 147 } 148 /* otherwise, return in CLOSING state, the psm will handle it */ 156 } else { 157 /* otherwise, go to CLOSING state, the psm will handle terminating the connection */ 158 CHECK_FCT( fd_psm_change_state(peer, STATE_CLOSING) ); 159 } 149 160 } 150 161 … … 188 199 189 200 /* Update the peer state and timer */ 190 CHECK_FCT( fd_psm_change_state(peer, STATE_CLOSING ) );201 CHECK_FCT( fd_psm_change_state(peer, STATE_CLOSING_GRACE) ); 191 202 fd_psm_next_timeout(peer, 0, DPR_TIMEOUT); 192 203 -
libfdcore/p_out.c
r975 r1010 173 173 int fd_out_send(struct msg ** msg, struct cnxctx * cnx, struct fd_peer * peer, uint32_t flags) 174 174 { 175 struct msg_hdr * hdr; 176 175 177 TRACE_ENTRY("%p %p %p %x", msg, cnx, peer, flags); 176 178 CHECK_PARAMS( msg && *msg && (cnx || (peer && peer->p_cnxctx))); 179 180 if (peer) { 181 CHECK_FCT( fd_msg_hdr(*msg, &hdr) ); 182 if (!(hdr->msg_flags & CMD_FLAG_REQUEST)) { 183 /* Update the count of pending answers to send */ 184 CHECK_POSIX( pthread_mutex_lock(&peer->p_state_mtx) ); 185 peer->p_reqin_count--; 186 CHECK_POSIX( pthread_mutex_unlock(&peer->p_state_mtx) ); 187 } 188 } 177 189 178 190 if (fd_peer_getstate(peer) == STATE_OPEN) { -
libfdcore/p_psm.c
r975 r1010 45 45 begining and end of a connection lifetime. It means we need agility to 46 46 switch between "ordering enforced" and "ordering not enforced to counter 47 H otLB" modes of operation.48 49 The connection state machine represented in RFC3588 (and rfc3588bis) is47 Head of the Line Blocking" modes of operation. 48 49 The connection state machine represented in RFC3588 (and RFC6733) is 50 50 incomplete, because it lacks the SUSPECT state and the 3 DWR/DWA 51 51 exchanges (section 5.1) when the peer recovers from this state. … … 91 91 application message is lost. 92 92 93 This situation is actually quite possiblebecause DPR/DPA messages are93 This situation is actually happening easily because DPR/DPA messages are 94 94 very short, while application messages can be quite large. Therefore, 95 95 they require much more time to deliver. … … 97 97 I really cannot see a way to counter this effect by using the ordering 98 98 of the messages, except by applying a timer (state STATE_CLOSING_GRACE). 99 This timer can be also useful when we detect that some messages has not 100 yet received an answer on this link, to give time to the application to 101 complete the exchange ongoing. 99 102 100 103 However, this problem must be balanced with the fact that the message … … 197 200 return 0; 198 201 } 199 static int leave_open_state(struct fd_peer * peer )202 static int leave_open_state(struct fd_peer * peer, int skip_failover) 200 203 { 201 204 /* Remove from active peers list */ … … 208 211 209 212 /* Failover the messages */ 210 fd_peer_failover_msg(peer); 213 if (!skip_failover) { 214 fd_peer_failover_msg(peer); 215 } 211 216 212 217 return 0; … … 288 293 289 294 if (old == STATE_OPEN) { 290 CHECK_FCT( leave_open_state(peer) ); 291 } 292 295 CHECK_FCT( leave_open_state(peer, new_state == STATE_CLOSING_GRACE) ); 296 } 297 if (old == STATE_CLOSING_GRACE) { 298 fd_peer_failover_msg(peer); 299 } 300 293 301 if (new_state == STATE_OPEN) { 294 302 CHECK_FCT( enter_open_state(peer) ); … … 298 306 /* Purge event list */ 299 307 fd_psm_events_free(peer); 308 309 /* Reset the counter of pending anwers to send */ 310 peer->p_reqin_count = 0; 300 311 301 312 /* If the peer is not persistant, we destroy it */ … … 529 540 fd_msg_log( FD_MSG_LOG_TIMING, msg, "Answer received in %d.%06.6d sec.", delay.tv_sec, delay.tv_nsec / 1000 ); 530 541 } 542 } else { 543 /* Mark the incoming request so that we know we have pending answers for this peer */ 544 CHECK_POSIX_DO( pthread_mutex_lock(&peer->p_state_mtx), goto psm_end ); 545 peer->p_reqin_count++; 546 CHECK_POSIX_DO( pthread_mutex_unlock(&peer->p_state_mtx), goto psm_end ); 531 547 } 532 548 -
libfdcore/peers.c
r974 r1010 258 258 259 259 /* Return the value of srlist->cnt */ 260 int fd_peer_get_load_pending(struct peer_hdr *peer, int * load)260 int fd_peer_get_load_pending(struct peer_hdr *peer, long * to_receive, long * to_send) 261 261 { 262 262 struct fd_peer * p = (struct fd_peer *)peer; 263 TRACE_ENTRY("%p %p", peer, load); 264 CHECK_PARAMS(CHECK_PEER(peer) && load); 265 266 CHECK_POSIX( pthread_mutex_lock(&p->p_sr.mtx) ); 267 *load = p->p_sr.cnt; 268 CHECK_POSIX( pthread_mutex_unlock(&p->p_sr.mtx) ); 263 TRACE_ENTRY("%p %p %p", peer, to_receive, to_send); 264 CHECK_PARAMS(CHECK_PEER(peer)); 265 266 if (to_receive) { 267 CHECK_POSIX( pthread_mutex_lock(&p->p_sr.mtx) ); 268 *to_receive = p->p_sr.cnt; 269 CHECK_POSIX( pthread_mutex_unlock(&p->p_sr.mtx) ); 270 } 271 if (to_send) { 272 CHECK_POSIX( pthread_mutex_lock(&p->p_state_mtx) ); 273 *to_send = p->p_reqin_count; 274 CHECK_POSIX( pthread_mutex_unlock(&p->p_state_mtx) ); 275 } 269 276 270 277 return 0; … … 411 418 } 412 419 413 snprintf(buf, sizeof(buf), "> %s\t%s\t[% dsr]", STATE_STR(fd_peer_getstate(peer)), peer->p_hdr.info.pi_diamid, peer->p_sr.cnt);420 snprintf(buf, sizeof(buf), "> %s\t%s\t[%ldsr,%ldpa]", STATE_STR(fd_peer_getstate(peer)), peer->p_hdr.info.pi_diamid, peer->p_sr.cnt, peer->p_reqin_count); 414 421 if (details > INFO) { 415 422 snprintf(buf+strlen(buf), sizeof(buf)-strlen(buf), "\t(rlm:%s)", peer->p_hdr.info.runtime.pir_realm ?: "<unknown>");
Note: See TracChangeset
for help on using the changeset viewer.