File 0001-rfc6544.patch of Package pjproject
Copyright (C) 2018-2020 Savoir-faire Linux Inc.
ice: rfc6544 support
This patch is an implementation proposal of the RFC 6544 into PJNATH.
This allow PJNATH to support TCP ICE candidates and open a direct TCP
connection between peers.
+ BUG (semi-fixed with this patch): If an active_sock is busy due to
a pending packet and receives a new packet to send, the final sent
packet will be a mix between the pending packet and the new one.
To avoid this, pj_ice_strans_sendto2 is now introduced.
Written by
Sébastien Blin <sebastien.blin@savoirfairelinux.com>
on behalf of Savoir-faire Linux.
Rebased for pjsip 2.10 by Peymane Marandi
<paymon@savoirfairelinux.com>
on behalf of Savoir-faire Linux.
---
pjnath/include/pjnath/ice_session.h | 151 +++-
pjnath/include/pjnath/ice_strans.h | 21 +
pjnath/include/pjnath/stun_session.h | 75 +-
pjnath/include/pjnath/stun_sock.h | 67 +-
pjnath/include/pjnath/turn_sock.h | 11 +
pjnath/src/pjnath-test/concur_test.c | 5 +-
pjnath/src/pjnath-test/sess_auth.c | 14 +-
pjnath/src/pjnath-test/stun_sock_test.c | 7 +-
pjnath/src/pjnath/ice_session.c | 494 +++++++++--
pjnath/src/pjnath/ice_strans.c | 743 +++++++++++++---
pjnath/src/pjnath/nat_detect.c | 7 +-
pjnath/src/pjnath/stun_session.c | 15 +-
pjnath/src/pjnath/stun_sock.c | 1082 +++++++++++++++++++----
pjnath/src/pjnath/stun_transaction.c | 3 +
pjnath/src/pjnath/turn_session.c | 3 +-
pjnath/src/pjnath/turn_sock.c | 24 +-
pjnath/src/pjturn-client/client_main.c | 2 +-
pjnath/src/pjturn-srv/allocation.c | 3 +-
pjnath/src/pjturn-srv/server.c | 2 +-
pjsip-apps/src/samples/icedemo.c | 116 ++-
pjsip/src/pjsua-lib/pjsua_core.c | 2 +-
21 files changed, 2438 insertions(+), 409 deletions(-)
diff --git a/pjnath/include/pjnath/ice_session.h b/pjnath/include/pjnath/ice_session.h
index 8971220f0..4cccd7c64 100644
--- a/pjnath/include/pjnath/ice_session.h
+++ b/pjnath/include/pjnath/ice_session.h
@@ -163,6 +163,52 @@ typedef enum pj_ice_cand_type
} pj_ice_cand_type;
+/**
+ * ICE candidates types like described by RFC 6544.
+ */
+typedef enum pj_ice_cand_transport {
+ /**
+ * Candidates UDP compatible
+ */
+ PJ_CAND_UDP,
+ /**
+ * Candidates sending outgoing TCP connections
+ */
+ PJ_CAND_TCP_ACTIVE,
+ /**
+ * Candidates accepting incoming TCP connections
+ */
+ PJ_CAND_TCP_PASSIVE,
+ /**
+ * Candidates capable of receiving incoming connections and sending
+ * connections
+ */
+ PJ_CAND_TCP_SO
+} pj_ice_cand_transport;
+
+/**
+ * ICE transport types, which will be used both to specify the connection
+ * type for reaching candidates and other client
+ */
+typedef enum pj_ice_tp_type {
+ /**
+ * UDP transport, which value corresponds to IANA protocol number.
+ */
+ PJ_ICE_TP_UDP = 17,
+
+ /**
+ * TCP transport, which value corresponds to IANA protocol number.
+ */
+ PJ_ICE_TP_TCP = 6,
+
+ /**
+ * TLS transport. The TLS transport will only be used as the connection
+ * type to reach the server and never as the allocation transport type.
+ */
+ PJ_ICE_TP_TLS = 255
+
+} pj_ice_tp_type;
+
/** Forward declaration for pj_ice_sess */
typedef struct pj_ice_sess pj_ice_sess;
@@ -309,6 +355,11 @@ typedef struct pj_ice_sess_cand
*/
pj_sockaddr rel_addr;
+ /**
+ * Transport used (TCP or UDP)
+ */
+ pj_ice_cand_transport transport;
+
} pj_ice_sess_cand;
@@ -324,6 +375,22 @@ typedef enum pj_ice_sess_check_state
*/
PJ_ICE_SESS_CHECK_STATE_FROZEN,
+ /**
+ * The following status is used when a packet sent via TURN got a
+ * "Connection reset by peer". This mean that the peer didn't allow
+ * us to connect yet. The socket will be reconnected during the next
+ * loop.
+ */
+ PJ_ICE_SESS_CHECK_STATE_NEEDS_RETRY,
+
+ /**
+ * TODO (sblin): REMOVE THIS! - https://github.com/coturn/coturn/issues/408
+ * For now, this status is only used because sometimes, the first packet
+ * doesn't receive any response. So, we retry to send the packet every
+ * 50 loops.
+ */
+ PJ_ICE_SESS_CHECK_STATE_NEEDS_FIRST_PACKET,
+
/**
* A check has not been performed for this pair, and can be
* performed as soon as it is the highest priority Waiting pair on
@@ -331,6 +398,12 @@ typedef enum pj_ice_sess_check_state
*/
PJ_ICE_SESS_CHECK_STATE_WAITING,
+ /**
+ * A check has not been performed for this pair, but TCP socket
+ * is currently connecting to the pair. Wait to finish the connection.
+ */
+ PJ_ICE_SESS_CHECK_STATE_PENDING,
+
/**
* A check has not been performed for this pair, and can be
* performed as soon as it is the highest priority Waiting pair on
@@ -520,6 +593,41 @@ typedef struct pj_ice_sess_cb
void *pkt, pj_size_t size,
const pj_sockaddr_t *src_addr,
unsigned src_addr_len);
+
+ /**
+ * Wait for TCP and send connectivity check
+ *
+ * @param ice The ICE session.
+ * @param clist The ICE connection list
+ * @param check_id The wanted check.
+ */
+ pj_status_t (*wait_tcp_connection)(pj_ice_sess *ice,
+ pj_ice_sess_checklist *clist,
+ unsigned check_id);
+
+ /**
+ * Reconnect a resetted TCP connection and send connectivity check
+ * cf. PJ_ICE_SESS_CHECK_STATE_NEEDS_RETRY
+ *
+ * @param ice The ICE session.
+ * @param clist The ICE connection list
+ * @param check_id The wanted check.
+ */
+ pj_status_t (*reconnect_tcp_connection)(pj_ice_sess *ice,
+ pj_ice_sess_checklist *clist,
+ unsigned check_id);
+
+ /**
+ * Close TCP socket
+ *
+ * @param ice The ICE session.
+ * @param clist The ICE connection list
+ * @param check_id The wanted check.
+ */
+ pj_status_t (*close_tcp_connection)(pj_ice_sess *ice,
+ pj_ice_sess_checklist *clist,
+ unsigned check_id);
+
} pj_ice_sess_cb;
@@ -636,6 +744,7 @@ struct pj_ice_sess
pj_bool_t valid_pair_found; /**< First pair found */
pj_status_t ice_status; /**< Error status. */
pj_timer_entry timer; /**< ICE timer. */
+ pj_timer_entry timer_connect; /**< ICE timer tcp timeout*/
pj_ice_sess_cb cb; /**< Callback. */
pj_stun_config stun_cfg; /**< STUN settings. */
@@ -855,6 +964,7 @@ PJ_DECL(pj_status_t) pj_ice_sess_set_prefs(pj_ice_sess *ice,
* @param rel_addr Optional related address.
* @param addr_len Length of addresses.
* @param p_cand_id Optional pointer to receive the candidate ID.
+ * @param transport Candidate's type
*
* @return PJ_SUCCESS if candidate is successfully added.
*/
@@ -868,7 +978,8 @@ PJ_DECL(pj_status_t) pj_ice_sess_add_cand(pj_ice_sess *ice,
const pj_sockaddr_t *base_addr,
const pj_sockaddr_t *rel_addr,
int addr_len,
- unsigned *p_cand_id);
+ unsigned *p_cand_id,
+ pj_ice_cand_transport transport);
/**
* Find default candidate for the specified component ID, using this
@@ -980,6 +1091,44 @@ PJ_DECL(pj_status_t) pj_ice_sess_on_rx_pkt(pj_ice_sess *ice,
const pj_sockaddr_t *src_addr,
int src_addr_len);
+/**
+ * Notification when ICE session get a new incoming connection
+ *
+ * @param ice The ICE session.
+ * @param transport_id Related transport
+ * @param status PJ_SUCCESS when connection is made, or any errors
+ * if the connection has failed (or if the peer has
+ * disconnected after an established connection).
+ * @param remote_addr Connected remove address
+ */
+PJ_DECL(void) ice_sess_on_peer_connection(pj_ice_sess *ice,
+ pj_uint8_t transport_id,
+ pj_status_t status,
+ pj_sockaddr_t* remote_addr);
+
+/**
+ * Notification when ICE session get a new resetted connection
+ * cf PJ_ICE_SESS_CHECK_STATE_NEEDS_RETRY
+ *
+ * @param ice The ICE session.
+ * @param transport_id Related transport
+ * @param remote_addr Connected remove address
+ */
+PJ_DECL(void) ice_sess_on_peer_reset_connection(pj_ice_sess *ice,
+ pj_uint8_t transport_id,
+ pj_sockaddr_t* remote_addr);
+
+/**
+ * Notification when ICE session get a new packet
+ * Used to remove the PJ_ICE_SESS_CHECK_STATE_NEEDS_FIRST_PACKET status
+ *
+ * @param ice The ICE session.
+ * @param transport_id Related transport
+ * @param remote_addr Connected remove address
+ */
+PJ_DECL(void) ice_sess_on_peer_packet(pj_ice_sess *ice,
+ pj_uint8_t transport_id,
+ pj_sockaddr_t* remote_addr);
/**
diff --git a/pjnath/include/pjnath/ice_strans.h b/pjnath/include/pjnath/ice_strans.h
index d0af76679..9eb74b35f 100644
--- a/pjnath/include/pjnath/ice_strans.h
+++ b/pjnath/include/pjnath/ice_strans.h
@@ -274,6 +274,13 @@ typedef struct pj_ice_strans_stun_cfg
*/
pj_bool_t ignore_stun_error;
+ /**
+ * Type of connection to the STUN server.
+ *
+ * Default is PJ_STUN_TP_UDP.
+ */
+ pj_stun_tp_type conn_type;
+
} pj_ice_strans_stun_cfg;
@@ -289,6 +296,13 @@ typedef struct pj_ice_strans_turn_cfg
*/
int af;
+ /**
+ * If we want to use UDP or TCP as described by RFC 6544.
+ * This will discover candidates via TCP sockets. Then it will
+ * transfer messages on the transport via TCP.
+ */
+ pj_ice_tp_type protocol;
+
/**
* Optional TURN socket settings. The default values will be
* initialized by #pj_turn_sock_cfg_default(). This contains
@@ -368,6 +382,13 @@ typedef struct pj_ice_strans_cfg
*/
int af;
+ /**
+ * If we want to use UDP or TCP as described by RFC 6544.
+ * This will discover candidates via TCP sockets. Then it will
+ * transfer messages on the transport via TCP.
+ */
+ pj_ice_tp_type protocol;
+
/**
* STUN configuration which contains the timer heap and
* ioqueue instance to be used, and STUN retransmission
diff --git a/pjnath/include/pjnath/stun_session.h b/pjnath/include/pjnath/stun_session.h
index bee630ab4..3f2ecf739 100644
--- a/pjnath/include/pjnath/stun_session.h
+++ b/pjnath/include/pjnath/stun_session.h
@@ -174,6 +174,29 @@ typedef struct pj_stun_rx_data pj_stun_rx_data;
/** Forward declaration for pj_stun_session */
typedef struct pj_stun_session pj_stun_session;
+/**
+ * STUN transport types, which will be used both to specify the connection
+ * type for reaching STUN server and the type of allocation transport to be
+ * requested to server (the REQUESTED-TRANSPORT attribute).
+ */
+typedef enum pj_stun_tp_type {
+ /**
+ * UDP transport, which value corresponds to IANA protocol number.
+ */
+ PJ_STUN_TP_UDP = 17,
+
+ /**
+ * TCP transport, which value corresponds to IANA protocol number.
+ */
+ PJ_STUN_TP_TCP = 6,
+
+ /**
+ * TLS transport. The TLS transport will only be used as the connection
+ * type to reach the server and never as the allocation transport type.
+ */
+ PJ_STUN_TP_TLS = 255
+
+} pj_stun_tp_type;
/**
* This is the callback to be registered to pj_stun_session, to send
@@ -307,6 +330,38 @@ typedef struct pj_stun_session_cb
const pj_sockaddr_t *src_addr,
unsigned src_addr_len);
+ /**
+ * Notification when STUN session get a ConnectionAttempt indication.
+ *
+ * @param stun_session The STUN session.
+ * @param status PJ_SUCCESS when connection is made, or any errors
+ * if the connection has failed (or if the peer has
+ * disconnected after an established connection).
+ * @param remote_addr The remote connected
+ */
+ void (*on_peer_connection)(pj_stun_session *sess,
+ pj_status_t status,
+ pj_sockaddr_t* remote_addr);
+
+ /**
+ * Notification when STUN connection is resetted (TCP only).
+ *
+ * @param stun_session The STUN session.
+ * @param remote_addr The remote resetted
+ */
+ void (*on_peer_reset_connection)(pj_stun_session *sess,
+ pj_sockaddr_t*
+ remote_addr);
+
+ /**
+ * Notification when STUN connection is resetted (TCP only).
+ *
+ * @param stun_session The STUN session.
+ * @param remote_addr The remote resetted
+ */
+ void (*on_peer_packet)(pj_stun_session *sess,
+ pj_sockaddr_t* remote_addr);
+
} pj_stun_session_cb;
@@ -390,6 +445,7 @@ typedef enum pj_stun_sess_msg_log_flag
* @param grp_lock Optional group lock to be used by this session.
* If NULL, the session will create one itself.
* @param p_sess Pointer to receive STUN session instance.
+ * @param conn_type If the session use UDP or TCP
*
* @return PJ_SUCCESS on success, or the appropriate error code.
*/
@@ -398,7 +454,8 @@ PJ_DECL(pj_status_t) pj_stun_session_create(pj_stun_config *cfg,
const pj_stun_session_cb *cb,
pj_bool_t fingerprint,
pj_grp_lock_t *grp_lock,
- pj_stun_session **p_sess);
+ pj_stun_session **p_sess,
+ pj_stun_tp_type conn_type);
/**
* Destroy the STUN session and all objects created in the context of
@@ -752,6 +809,22 @@ PJ_DECL(pj_status_t) pj_stun_session_on_rx_pkt(pj_stun_session *sess,
PJ_DECL(void) pj_stun_msg_destroy_tdata(pj_stun_session *sess,
pj_stun_tx_data *tdata);
+/**
+ *
+ * @param sess The STUN session.
+ *
+ * @return The callback linked to the STUN session
+ */
+PJ_DECL(pj_stun_session_cb *) pj_stun_session_callback(pj_stun_session *sess);
+
+/**
+ *
+ * @param sess The STUN session.
+ *
+ * @return The connection type linked to the STUN session
+ */
+PJ_DECL(pj_stun_tp_type) pj_stun_session_tp_type(pj_stun_session *sess);
+
/**
* @}
diff --git a/pjnath/include/pjnath/stun_sock.h b/pjnath/include/pjnath/stun_sock.h
index fff4df885..bfc9c1415 100644
--- a/pjnath/include/pjnath/stun_sock.h
+++ b/pjnath/include/pjnath/stun_sock.h
@@ -24,10 +24,14 @@
* @file stun_sock.h
* @brief STUN aware socket transport
*/
+#include <pj/activesock.h>
#include <pjnath/stun_config.h>
+#include <pjnath/stun_session.h>
#include <pjlib-util/resolver.h>
+#include <pjlib-util/srv_resolver.h>
#include <pj/ioqueue.h>
#include <pj/lock.h>
+#include <pj/pool.h>
#include <pj/sock.h>
#include <pj/sock_qos.h>
@@ -87,7 +91,17 @@ typedef enum pj_stun_sock_op
/**
* IP address change notification from the keep-alive operation.
*/
- PJ_STUN_SOCK_MAPPED_ADDR_CHANGE
+ PJ_STUN_SOCK_MAPPED_ADDR_CHANGE,
+
+ /**
+ * STUN session was destroyed.
+ */
+ PJ_STUN_SESS_DESTROYED,
+
+ /**
+ * TCP fails to connect
+ */
+ PJ_STUN_TCP_CONNECT_ERROR
} pj_stun_sock_op;
@@ -197,6 +211,11 @@ typedef struct pj_stun_sock_info
*/
pj_sockaddr mapped_addr;
+ /**
+ * If connected, the remote address will be stored here.
+ */
+ pj_sockaddr outgoing_addr;
+
/**
* Number of interface address aliases. The interface address aliases
* are list of all interface addresses in this host.
@@ -208,6 +227,11 @@ typedef struct pj_stun_sock_info
*/
pj_sockaddr aliases[PJ_ICE_ST_MAX_CAND];
+ /**
+ * The tranport type of the socket
+ */
+ pj_stun_tp_type conn_type;
+
} pj_stun_sock_info;
@@ -343,6 +367,9 @@ PJ_DECL(void) pj_stun_sock_cfg_default(pj_stun_sock_cfg *cfg);
* the operation of this transport.
* @param af Address family of socket. Currently pj_AF_INET()
* and pj_AF_INET6() are supported.
+ * @param conn_type Connection type to the STUN server. Both TCP and UDP are
+ * supported.
+ *
* @param name Optional name to be given to this transport to
* assist debugging.
* @param cb Callback to receive events/data from the transport.
@@ -357,6 +384,7 @@ PJ_DECL(void) pj_stun_sock_cfg_default(pj_stun_sock_cfg *cfg);
PJ_DECL(pj_status_t) pj_stun_sock_create(pj_stun_config *stun_cfg,
const char *name,
int af,
+ pj_stun_tp_type conn_type,
const pj_stun_sock_cb *cb,
const pj_stun_sock_cfg *cfg,
void *user_data,
@@ -485,6 +513,43 @@ PJ_DECL(pj_status_t) pj_stun_sock_sendto(pj_stun_sock *stun_sock,
const pj_sockaddr_t *dst_addr,
unsigned addr_len);
+
+#if PJ_HAS_TCP
+/**
+ * Connect active socket to remote address
+ * @param stun_sock
+ * @param remote_addr the destination
+ * @param af address family
+ */
+PJ_DECL(pj_status_t) pj_stun_sock_connect_active(pj_stun_sock *stun_sock,
+ const pj_sockaddr_t *remote_addr,
+ int af);
+
+/**
+ * Connect active socket to remote address
+ * @param stun_sock
+ * @param remote_addr the destination
+ * @param af address family
+ */
+PJ_DECL(pj_status_t) pj_stun_sock_reconnect_active(pj_stun_sock *stun_sock,
+ const pj_sockaddr_t *remote_addr,
+ int af);
+
+/**
+ * Close active socket
+ * @param stun_sock
+ * @param remote_addr The remote address linked
+ */
+PJ_DECL(pj_status_t) pj_stun_sock_close(pj_stun_sock *stun_sock,
+ const pj_sockaddr_t *remote_addr);
+
+#endif
+
+/**
+ * Retrieve the linked session
+ * @param stun_sock
+ */
+PJ_DECL(pj_stun_session *) pj_stun_sock_get_session(pj_stun_sock *stun_sock);
/**
* @}
*/
diff --git a/pjnath/include/pjnath/turn_sock.h b/pjnath/include/pjnath/turn_sock.h
index e4d306174..35388809f 100644
--- a/pjnath/include/pjnath/turn_sock.h
+++ b/pjnath/include/pjnath/turn_sock.h
@@ -623,6 +623,17 @@ PJ_DECL(pj_status_t) pj_turn_sock_bind_channel(pj_turn_sock *turn_sock,
const pj_sockaddr_t *peer,
unsigned addr_len);
+/**
+ * Check if peer is a dataconn
+ *
+ * @param turn_sock The turn sock
+ * @param peer The peer addr to check
+ *
+ * @return true if dataconn else false
+ */
+PJ_DECL(pj_bool_t) pj_turn_sock_has_dataconn(pj_turn_sock *turn_sock,
+ const pj_sockaddr_t *peer);
+
/**
* @}
diff --git a/pjnath/src/pjnath-test/concur_test.c b/pjnath/src/pjnath-test/concur_test.c
index c3013d2ab..7777c637c 100644
--- a/pjnath/src/pjnath-test/concur_test.c
+++ b/pjnath/src/pjnath-test/concur_test.c
@@ -184,8 +184,9 @@ static int stun_destroy_test_session(struct stun_test_session *test_sess)
char name[10];
sprintf(name, "stun%02d", i);
status = pj_stun_sock_create(&test_sess->stun_cfg, name, pj_AF_INET(),
- &stun_cb, NULL, test_sess,
- &stun_sock[i]);
+ PJ_STUN_TP_UDP,
+ &stun_cb, NULL, test_sess,
+ &stun_sock[i]);
if (status != PJ_SUCCESS) {
PJ_PERROR(1,(THIS_FILE, status, "Error creating stun socket"));
return -10;
diff --git a/pjnath/src/pjnath-test/sess_auth.c b/pjnath/src/pjnath-test/sess_auth.c
index 055eaad61..1d07bf299 100644
--- a/pjnath/src/pjnath-test/sess_auth.c
+++ b/pjnath/src/pjnath-test/sess_auth.c
@@ -248,7 +248,8 @@ static int create_std_server(pj_stun_auth_type auth_type,
pj_bzero(&sess_cb, sizeof(sess_cb));
sess_cb.on_rx_request = &server_on_rx_request;
sess_cb.on_send_msg = &server_send_msg;
- status = pj_stun_session_create(&stun_cfg, "server", &sess_cb, PJ_FALSE, NULL, &server->sess);
+ status = pj_stun_session_create(&stun_cfg, "server", &sess_cb, PJ_FALSE,
+ NULL, &server->sess, PJ_STUN_TP_UDP);
if (status != PJ_SUCCESS) {
destroy_server();
return -10;
@@ -489,7 +490,8 @@ static int run_client_test(const char *title,
pj_bzero(&sess_cb, sizeof(sess_cb));
sess_cb.on_request_complete = &client_on_request_complete;
sess_cb.on_send_msg = &client_send_msg;
- status = pj_stun_session_create(&stun_cfg, "client", &sess_cb, PJ_FALSE, NULL, &client->sess);
+ status = pj_stun_session_create(&stun_cfg, "client", &sess_cb, PJ_FALSE,
+ NULL, &client->sess, PJ_STUN_TP_UDP);
if (status != PJ_SUCCESS) {
destroy_client_server();
return -200;
@@ -575,8 +577,12 @@ static int run_client_test(const char *title,
}
/* Send the request */
- status = pj_stun_session_send_msg(client->sess, NULL, PJ_FALSE, PJ_TRUE, &server->addr,
- pj_sockaddr_get_len(&server->addr), tdata);
+ status = pj_stun_session_send_msg(client->sess, NULL, PJ_FALSE,
+ (pj_stun_session_tp_type(client->sess) ==
+ PJ_STUN_TP_UDP),
+ &server->addr,
+ pj_sockaddr_get_len(&server->addr),
+ tdata);
if (status != PJ_SUCCESS) {
destroy_client_server();
return -270;
diff --git a/pjnath/src/pjnath-test/stun_sock_test.c b/pjnath/src/pjnath-test/stun_sock_test.c
index fff4fad26..e7f8b84eb 100644
--- a/pjnath/src/pjnath-test/stun_sock_test.c
+++ b/pjnath/src/pjnath-test/stun_sock_test.c
@@ -255,8 +255,9 @@ static pj_status_t create_client(pj_stun_config *cfg,
pj_bzero(&cb, sizeof(cb));
cb.on_status = &stun_sock_on_status;
cb.on_rx_data = &stun_sock_on_rx_data;
- status = pj_stun_sock_create(cfg, NULL, GET_AF(use_ipv6), &cb, &sock_cfg,
- client, &client->sock);
+ status = pj_stun_sock_create(cfg, NULL, GET_AF(use_ipv6),
+ PJ_STUN_TP_UDP,
+ &cb, &sock_cfg, client, &client->sock);
if (status != PJ_SUCCESS) {
app_perror(" pj_stun_sock_create()", status);
pj_pool_release(pool);
@@ -585,7 +586,7 @@ static int keep_alive_test(pj_stun_config *cfg, pj_bool_t use_ipv6)
PJ_LOG(3,(THIS_FILE, " sending to %s", pj_sockaddr_print(&info.srv_addr, txt, sizeof(txt), 3)));
}
status = pj_stun_sock_sendto(client->sock, NULL, &ret, sizeof(ret),
- 0, &info.srv_addr,
+ 0, &info.srv_addr,
pj_sockaddr_get_len(&info.srv_addr));
if (status != PJ_SUCCESS && status != PJ_EPENDING) {
app_perror(" error: server sending data", status);
diff --git a/pjnath/src/pjnath/ice_session.c b/pjnath/src/pjnath/ice_session.c
index 2a4125bc5..9936347a9 100644
--- a/pjnath/src/pjnath/ice_session.c
+++ b/pjnath/src/pjnath/ice_session.c
@@ -18,6 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <pjnath/ice_session.h>
+#include <pjnath/stun_session.h>
#include <pj/addr_resolv.h>
#include <pj/array.h>
#include <pj/assert.h>
@@ -44,7 +45,10 @@ static const char *cand_type_names[] =
static const char *check_state_name[] =
{
"Frozen",
+ "Needs Retry",
+ "Needs First Packet",
"Waiting",
+ "Pending",
"In Progress",
"Succeeded",
"Failed"
@@ -75,7 +79,8 @@ enum timer_type
valid check for every components. */
TIMER_START_NOMINATED_CHECK,/**< Controlling agent start connectivity
checks with USE-CANDIDATE flag. */
- TIMER_KEEP_ALIVE /**< ICE keep-alive timer. */
+ TIMER_KEEP_ALIVE, /**< ICE keep-alive timer. */
+ TIMER_CONNECTION_TIMEOUT
};
@@ -123,6 +128,8 @@ typedef struct timer_data
{
pj_ice_sess *ice;
pj_ice_sess_checklist *clist;
+ /* TODO (remove), for now, needed for the NEEDS_FIRST_PACKET state */
+ unsigned first_packet_counter;
} timer_data;
@@ -133,6 +140,7 @@ typedef struct timer_data
/* Forward declarations */
static void on_timer(pj_timer_heap_t *th, pj_timer_entry *te);
+static void on_tcp_connect_timeout(pj_ice_sess *ice);
static void on_ice_complete(pj_ice_sess *ice, pj_status_t status);
static void ice_keep_alive(pj_ice_sess *ice, pj_bool_t send_now);
static void ice_on_destroy(void *obj);
@@ -291,7 +299,8 @@ static pj_status_t init_comp(pj_ice_sess *ice,
status = pj_stun_session_create(&ice->stun_cfg, NULL,
&sess_cb, PJ_TRUE,
ice->grp_lock,
- &comp->stun_sess);
+ &comp->stun_sess,
+ PJ_STUN_TP_UDP);
if (status != PJ_SUCCESS)
return status;
@@ -359,6 +368,7 @@ PJ_DEF(pj_status_t) pj_ice_sess_create(pj_stun_config *stun_cfg,
pj_ice_sess_options_default(&ice->opt);
pj_timer_entry_init(&ice->timer, TIMER_NONE, (void*)ice, &on_timer);
+ pj_timer_entry_init(&ice->timer_connect, TIMER_NONE, (void*)ice, &on_timer);
pj_ansi_snprintf(ice->obj_name, sizeof(ice->obj_name),
name, ice);
@@ -717,7 +727,8 @@ PJ_DEF(pj_status_t) pj_ice_sess_add_cand(pj_ice_sess *ice,
const pj_sockaddr_t *base_addr,
const pj_sockaddr_t *rel_addr,
int addr_len,
- unsigned *p_cand_id)
+ unsigned *p_cand_id,
+ pj_ice_cand_transport transport)
{
pj_ice_sess_cand *lcand;
pj_status_t status = PJ_SUCCESS;
@@ -740,6 +751,7 @@ PJ_DEF(pj_status_t) pj_ice_sess_add_cand(pj_ice_sess *ice,
lcand->comp_id = (pj_uint8_t)comp_id;
lcand->transport_id = (pj_uint8_t)transport_id;
lcand->type = type;
+ lcand->transport = transport;
pj_strdup(ice->pool, &lcand->foundation, foundation);
lcand->prio = CALC_CAND_PRIO(ice, type, local_pref, lcand->comp_id);
pj_sockaddr_cp(&lcand->addr, addr);
@@ -961,7 +973,8 @@ static void check_set_state(pj_ice_sess *ice, pj_ice_sess_check *check,
pj_ice_sess_check_state st,
pj_status_t err_code)
{
- pj_assert(check->state < PJ_ICE_SESS_CHECK_STATE_SUCCEEDED);
+ if (check->state >= PJ_ICE_SESS_CHECK_STATE_SUCCEEDED)
+ return;
LOG5((ice->obj_name, "Check %s: state changed from %s to %s",
dump_check(ice->tmp.txt, sizeof(ice->tmp.txt), &ice->clist, check),
@@ -1081,6 +1094,17 @@ static pj_status_t prune_checklist(pj_ice_sess *ice,
return PJNATH_EICENOHOSTCAND;
}
}
+
+ /* Section 6.2, RFC 6544 (https://tools.ietf.org/html/rfc6544)
+ * When the agent prunes the check list, it MUST also remove any pair
+ * for which the local candidate is a passive TCP candidate
+ */
+ if (clist->checks[i].lcand->transport == PJ_CAND_TCP_PASSIVE) {
+ pj_array_erase(clist->checks, sizeof(clist->checks[0]),
+ clist->count, i);
+ --clist->count;
+ --i;
+ }
}
/* Next remove a pair if its local and remote candidates are identical
@@ -1183,6 +1207,9 @@ static void on_timer(pj_timer_heap_t *th, pj_timer_entry *te)
case TIMER_KEEP_ALIVE:
ice_keep_alive(ice, PJ_TRUE);
break;
+ case TIMER_CONNECTION_TIMEOUT:
+ on_tcp_connect_timeout(ice);
+ break;
case TIMER_NONE:
/* Nothing to do, just to get rid of gcc warning */
break;
@@ -1619,6 +1646,43 @@ static pj_bool_t on_check_complete(pj_ice_sess *ice,
return PJ_FALSE;
}
+static void on_tcp_connect_timeout(pj_ice_sess* ice)
+{
+ pj_timer_heap_cancel_if_active(ice->stun_cfg.timer_heap,&ice->timer_connect,
+ TIMER_NONE);
+
+ pj_bool_t first_found = PJ_FALSE, set_timer = PJ_FALSE;
+
+ for (int i = 0; i<ice->clist.count && !set_timer; ++i) {
+ pj_ice_sess_check *check = &ice->clist.checks[i];
+ if (check->state == PJ_ICE_SESS_CHECK_STATE_PENDING) {
+ if (first_found) {
+ set_timer = PJ_TRUE;
+ } else {
+ first_found = PJ_TRUE;
+ if (*ice->cb.close_tcp_connection)
+ (*ice->cb.close_tcp_connection)(ice, &ice->clist, i);
+
+ check_set_state(ice, check,
+ PJ_ICE_SESS_CHECK_STATE_FAILED, PJ_ECANCELLED);
+ on_check_complete(ice, check);
+ }
+ }
+ }
+
+ if (set_timer && ice->timer_connect.id == TIMER_NONE) {
+ /* Reschedule */
+ pj_time_val delay = {
+ .sec = 0,
+ .msec = 1500
+ };
+ pj_time_val_normalize(&delay);
+ pj_timer_heap_schedule_w_grp_lock(ice->stun_cfg.timer_heap,
+ &ice->timer_connect, &delay,
+ TIMER_CONNECTION_TIMEOUT,
+ ice->grp_lock);
+ }
+}
/* Create checklist by pairing local candidates with remote candidates */
PJ_DEF(pj_status_t) pj_ice_sess_create_check_list(
@@ -1705,6 +1769,30 @@ PJ_DEF(pj_status_t) pj_ice_sess_create_check_list(
continue;
}
+ /* Section 6.2, RFC 6544 (https://tools.ietf.org/html/rfc6544)
+ * As with UDP, check lists are formed only by full ICE implementations.
+ * When forming candidate pairs, the following types of TCP candidates
+ * can be paired with each other:
+ *
+ * Local Remote
+ * Candidate Candidate
+ * ---------------------------
+ * tcp-so tcp-so
+ * tcp-active tcp-passive
+ * tcp-passive tcp-active
+ */
+ if ((lcand->transport == PJ_CAND_UDP &&
+ rcand->transport != PJ_CAND_UDP) ||
+ (lcand->transport == PJ_CAND_TCP_PASSIVE &&
+ rcand->transport != PJ_CAND_TCP_ACTIVE) ||
+ (lcand->transport == PJ_CAND_TCP_ACTIVE &&
+ rcand->transport != PJ_CAND_TCP_PASSIVE) ||
+ (lcand->transport == PJ_CAND_TCP_SO &&
+ rcand->transport != PJ_CAND_TCP_SO))
+ {
+ continue;
+ }
+
chk->lcand = lcand;
chk->rcand = rcand;
@@ -1749,6 +1837,7 @@ PJ_DEF(pj_status_t) pj_ice_sess_create_check_list(
td = PJ_POOL_ZALLOC_T(ice->pool, timer_data);
td->ice = ice;
td->clist = clist;
+ td->first_packet_counter = 1;
clist->timer.user_data = (void*)td;
clist->timer.cb = &periodic_timer;
@@ -1761,6 +1850,36 @@ PJ_DEF(pj_status_t) pj_ice_sess_create_check_list(
return PJ_SUCCESS;
}
+static pj_status_t send_connectivity_check(pj_ice_sess *ice,
+ pj_ice_sess_checklist *clist,
+ unsigned check_id,
+ pj_bool_t nominate,
+ pj_ice_msg_data *msg_data)
+{
+ pj_ice_sess_check *check;
+ const pj_ice_sess_cand *lcand;
+ const pj_ice_sess_cand *rcand;
+ pj_ice_sess_comp *comp;
+
+ check = &clist->checks[check_id];
+ lcand = check->lcand;
+ rcand = check->rcand;
+ comp = find_comp(ice, lcand->comp_id);
+
+ /* Note that USERNAME and MESSAGE-INTEGRITY will be added by the
+ * STUN session.
+ */
+
+ /* Initiate STUN transaction to send the request */
+
+ return pj_stun_session_send_msg(comp->stun_sess, msg_data, PJ_FALSE,
+ pj_stun_session_tp_type(comp->stun_sess)==
+ PJ_STUN_TP_UDP,
+ &rcand->addr,
+ pj_sockaddr_get_len(&rcand->addr),
+ check->tdata);
+}
+
/* Perform check on the specified candidate pair. */
static pj_status_t perform_check(pj_ice_sess *ice,
pj_ice_sess_checklist *clist,
@@ -1771,19 +1890,17 @@ static pj_status_t perform_check(pj_ice_sess *ice,
pj_ice_msg_data *msg_data;
pj_ice_sess_check *check;
const pj_ice_sess_cand *lcand;
- const pj_ice_sess_cand *rcand;
pj_uint32_t prio;
pj_status_t status;
check = &clist->checks[check_id];
lcand = check->lcand;
- rcand = check->rcand;
comp = find_comp(ice, lcand->comp_id);
+ pj_log_push_indent();
LOG5((ice->obj_name,
"Sending connectivity check for check %s",
dump_check(ice->tmp.txt, sizeof(ice->tmp.txt), clist, check)));
- pj_log_push_indent();
/* Create request */
status = pj_stun_session_create_req(comp->stun_sess,
@@ -1831,32 +1948,71 @@ static pj_status_t perform_check(pj_ice_sess *ice,
&ice->tie_breaker);
} else {
+ if (nominate) {
+ check->nominated = PJ_TRUE;
+ }
pj_stun_msg_add_uint64_attr(check->tdata->pool, check->tdata->msg,
PJ_STUN_ATTR_ICE_CONTROLLED,
&ice->tie_breaker);
}
+#if PJ_HAS_TCP
+ switch (lcand->transport) {
+ case PJ_CAND_TCP_ACTIVE:
+ switch (check->state) {
+ case PJ_ICE_SESS_CHECK_STATE_NEEDS_RETRY:
+ status = (*ice->cb.reconnect_tcp_connection)(ice, clist, check_id);
+ break;
+ case PJ_ICE_SESS_CHECK_STATE_NEEDS_FIRST_PACKET:
+ status = send_connectivity_check(ice, clist, check_id,
+ nominate, msg_data);
+ break;
+ default:
+ pj_timer_heap_cancel_if_active(ice->stun_cfg.timer_heap,
+ &ice->timer_connect, TIMER_NONE);
+ status = (*ice->cb.wait_tcp_connection)(ice, clist, check_id);
+ if (ice->timer_connect.id != TIMER_NONE) {
+ pj_assert(!"Not expected any timer active");
+ } else {
+ pj_time_val delay = {
+ .sec = 0,
+ .msec = 1500,
+ };
+ pj_time_val_normalize(&delay);
+ pj_timer_heap_schedule_w_grp_lock(ice->stun_cfg.timer_heap,
+ &ice->timer_connect, &delay,
+ TIMER_CONNECTION_TIMEOUT,
+ ice->grp_lock);
+ }
+ break;
+ }
+ break;
+ case PJ_CAND_TCP_PASSIVE:
+ case PJ_CAND_TCP_SO:
+ case PJ_CAND_UDP:
+ default:
+ status = send_connectivity_check(ice, clist, check_id, nominate, msg_data);
+ break;
+ }
+#else
+ status = send_connectivity_check(ice, clist, check_id, nominate, msg_data);
+#endif
- /* Note that USERNAME and MESSAGE-INTEGRITY will be added by the
- * STUN session.
- */
-
- /* Initiate STUN transaction to send the request */
- status = pj_stun_session_send_msg(comp->stun_sess, msg_data, PJ_FALSE,
- PJ_TRUE, &rcand->addr,
- pj_sockaddr_get_len(&rcand->addr),
- check->tdata);
- if (status != PJ_SUCCESS) {
+ if (status == PJ_SUCCESS) {
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_IN_PROGRESS,
+ status);
+ } else if (status == PJ_EPENDING) {
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_PENDING, status);
+ } else if (check->rcand->type == PJ_ICE_CAND_TYPE_RELAYED) {
+ /* TODO (sblin) remove this - https://github.com/coturn/coturn/issues/408 */
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_NEEDS_FIRST_PACKET,
+ status);
+ } else {
check->tdata = NULL;
pjnath_perror(ice->obj_name, "Error sending STUN request", status);
- pj_log_pop_indent();
- return status;
}
-
- check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_IN_PROGRESS,
- PJ_SUCCESS);
pj_log_pop_indent();
- return PJ_SUCCESS;
+ return status;
}
@@ -1893,39 +2049,95 @@ static pj_status_t start_periodic_check(pj_timer_heap_t *th,
pj_log_push_indent();
/* Send STUN Binding request for check with highest priority on
- * Waiting state.
+ * Retry state.
*/
- for (i=0; i<clist->count; ++i) {
- pj_ice_sess_check *check = &clist->checks[i];
- if (check->state == PJ_ICE_SESS_CHECK_STATE_WAITING) {
- status = perform_check(ice, clist, i, ice->is_nominating);
- if (status != PJ_SUCCESS) {
- check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_FAILED,
- status);
- on_check_complete(ice, check);
+ if (start_count == 0) {
+ for (i = 0; i < clist->count; ++i) {
+ pj_ice_sess_check *check = &clist->checks[i];
+ // Reconnect closed TURN sockets
+ if (check->state == PJ_ICE_SESS_CHECK_STATE_NEEDS_RETRY) {
+ status = perform_check(ice, clist, i, ice->is_nominating);
+ if (status != PJ_SUCCESS && status != PJ_EPENDING) {
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_FAILED,
+ status);
+ on_check_complete(ice, check);
+ }
+ ++start_count;
+ break;
}
+ }
+ }
- ++start_count;
- break;
+ if (start_count == 0) {
+ // TODO (sblin) remove - https://github.com/coturn/coturn/issues/408
+ pj_bool_t inc_counter = PJ_TRUE;
+ for (i = 0; i < clist->count; ++i) {
+ pj_ice_sess_check *check = &clist->checks[i];
+ if (check->state == PJ_ICE_SESS_CHECK_STATE_NEEDS_FIRST_PACKET) {
+ if (inc_counter) {
+ td->first_packet_counter += 1;
+ inc_counter = PJ_FALSE;
+ }
+ if (td->first_packet_counter % 50 == 0) {
+ status = perform_check(ice, clist, i, ice->is_nominating);
+ if (status != PJ_SUCCESS && status != PJ_EPENDING) {
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_FAILED,
+ status);
+ on_check_complete(ice, check);
+ }
+ }
+ ++start_count;
+ break;
+ }
+ }
+ }
+
+ /* Send STUN Binding request for check with highest priority on
+ * Waiting state.
+ */
+
+ if (start_count == 0) {
+ for (i = 0; i < clist->count; ++i) {
+ pj_ice_sess_check *check = &clist->checks[i];
+
+ if (check->state == PJ_ICE_SESS_CHECK_STATE_WAITING) {
+ status = perform_check(ice, clist, i, ice->is_nominating);
+ if (status != PJ_SUCCESS && status != PJ_EPENDING) {
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_FAILED,
+ status);
+ on_check_complete(ice, check);
+ }
+ ++start_count;
+ break;
+ }
}
}
/* If we don't have anything in Waiting state, perform check to
* highest priority pair that is in Frozen state.
*/
- if (start_count==0) {
- for (i=0; i<clist->count; ++i) {
+ if (start_count == 0) {
+ for (i = 0; i < clist->count; ++i) {
pj_ice_sess_check *check = &clist->checks[i];
if (check->state == PJ_ICE_SESS_CHECK_STATE_FROZEN) {
status = perform_check(ice, clist, i, ice->is_nominating);
- if (status != PJ_SUCCESS) {
- check_set_state(ice, check,
- PJ_ICE_SESS_CHECK_STATE_FAILED, status);
+ if (status != PJ_SUCCESS && status != PJ_EPENDING) {
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_FAILED, status);
on_check_complete(ice, check);
}
+ ++start_count;
+ break;
+ }
+ }
+ }
+ if (start_count == 0) {
+ // If all sockets are pending, do nothing
+ for (i = 0; i < clist->count; ++i) {
+ pj_ice_sess_check *check = &clist->checks[i];
+ if (check->state == PJ_ICE_SESS_CHECK_STATE_PENDING) {
++start_count;
break;
}
@@ -1933,14 +2145,14 @@ static pj_status_t start_periodic_check(pj_timer_heap_t *th,
}
/* Cannot start check because there's no suitable candidate pair.
- */
+ */
if (start_count!=0) {
/* Schedule for next timer */
pj_time_val timeout = {0, PJ_ICE_TA_VAL};
pj_time_val_normalize(&timeout);
pj_timer_heap_schedule_w_grp_lock(th, te, &timeout, PJ_TRUE,
- ice->grp_lock);
+ ice->grp_lock);
}
pj_grp_lock_release(ice->grp_lock);
@@ -2181,6 +2393,182 @@ static pj_status_t on_stun_send_msg(pj_stun_session *sess,
return status;
}
+static pj_ice_sess_check* get_current_check_at_state(pj_ice_sess *ice,
+ pj_sockaddr_t *remote_addr,
+ pj_ice_sess_check_state state,
+ int *current_check)
+{
+ if (!ice || !remote_addr)
+ return NULL;
+ // NOTE: Multiple checks can have the same remote, we only take care of the first
+ // First, check if the TCP is really connected. If not, abort
+ pj_ice_sess_check *check = NULL;
+ for (int i = 0; i < ice->clist.count; ++i) {
+ // Find related check
+ pj_ice_sess_check *c = &ice->clist.checks[i];
+ /* Host candidate not found this this srflx! */
+ if (pj_sockaddr_cmp(remote_addr, &c->rcand->addr) == 0) {
+ if (c->tdata == NULL || c->state != state)
+ continue;
+ /* Match */
+ check = c;
+ if (current_check) *current_check = i;
+ break;
+ }
+ }
+ return check;
+}
+
+void ice_sess_on_peer_connection(pj_ice_sess *ice,
+ pj_uint8_t transport_id,
+ pj_status_t status,
+ pj_sockaddr_t* remote_addr)
+{
+ // The TCP link is now ready. We can now send the first STUN message (send
+ // connectivity check) This should trigger on_stun_request_complete when
+ // finished
+ if (!remote_addr)
+ return;
+
+ int current_check = -1;
+ pj_ice_sess_check *check = get_current_check_at_state(ice,remote_addr,
+ PJ_ICE_SESS_CHECK_STATE_PENDING,
+ ¤t_check);
+ if (!check) {
+ // Handle peer reflexive candidates (incoming are still waiting here)
+ check = get_current_check_at_state(ice, remote_addr,
+ PJ_ICE_SESS_CHECK_STATE_WAITING,
+ ¤t_check);
+ if (!check) {
+ return;
+ }
+ }
+
+ const pj_ice_sess_cand *rcand = check->rcand;
+ if ((status == 120104 || status == 130054)/* CONNECTION RESET BY PEER */
+ && rcand->type == PJ_ICE_CAND_TYPE_RELAYED) {
+ /**
+ * This part of the code is triggered when using ICE over TCP via TURN
+ * In fact, the other peer has to authorize this peer to connect to
+ * the relayed candidate. This is done by set_perm from the other case.
+ * But from this side, we can't know if the peer has authorized us. If it's
+ * not the case, the connection will got a CONNECTION RESET BY PEER status.
+ * In this case, we can try to reconnect a bit after and this until the check
+ * reached its timeout.
+ */
+ check->state = PJ_ICE_SESS_CHECK_STATE_NEEDS_RETRY;
+ check_set_state(ice, check,PJ_ICE_SESS_CHECK_STATE_NEEDS_RETRY,
+ status);
+ return;
+ } else if (status != PJ_SUCCESS) {
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_FAILED, status);
+ on_check_complete(ice, check);
+ return;
+ }
+
+ // TCP is correctly connected. Craft the message to send
+ const pj_ice_sess_cand *lcand = check->lcand;
+ if (check->tdata == NULL) {
+ LOG5((ice->obj_name, "Error sending STUN request, empty data"));
+ return;
+ }
+ pj_ice_msg_data *msg_data =
+ PJ_POOL_ZALLOC_T(check->tdata->pool, pj_ice_msg_data);
+
+ msg_data->transport_id = transport_id;
+ msg_data->has_req_data = PJ_TRUE;
+ msg_data->data.req.ice = ice;
+ msg_data->data.req.clist = &ice->clist;
+ msg_data->data.req.ckid = current_check;
+
+ pj_ice_sess_comp *comp = find_comp(ice, lcand->comp_id);
+ pj_status_t status_send_msg;
+ // Note that USERNAME and MESSAGE-INTEGRITY will be added by the
+ // STUN session.
+
+ // Initiate STUN transaction to send the request
+ status_send_msg = pj_stun_session_send_msg(comp->stun_sess, msg_data,
+ PJ_FALSE, PJ_FALSE, &rcand->addr,
+ pj_sockaddr_get_len(&rcand->addr),
+ check->tdata);
+
+ if ((status_send_msg == 120104 || status_send_msg == 130054 /* CONNECTION RESET BY PEER */ || status_send_msg == 120032 /* BROKEN PIPE */)
+ && rcand->type == PJ_ICE_CAND_TYPE_RELAYED) {
+ /**
+ * This part of the code is triggered when using ICE over TCP via TURN
+ * In fact, the other peer has to authorize this peer to connect to
+ * the relayed candidate. This is done by set_perm from the other case.
+ * But from this side, we can't know if the peer has authorized us. If it's
+ * not the case, the connection will got a CONNECTION RESET BY PEER status.
+ * In this case, we can try to reconnect a bit after and this until the check
+ * reached its timeout.
+ */
+ check_set_state(ice, check,PJ_ICE_SESS_CHECK_STATE_NEEDS_RETRY,
+ status_send_msg);
+ } else if (status_send_msg == PJ_EBUSY /* EBUSY */) {
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_NEEDS_FIRST_PACKET,
+ status_send_msg);
+ } else if (status_send_msg != PJ_SUCCESS) {
+ check->tdata = NULL;
+ pjnath_perror(ice->obj_name, "Error sending STUN request", status_send_msg);
+ pj_log_pop_indent();
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_FAILED, status);
+ on_check_complete(ice, check);
+ } else {
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_IN_PROGRESS, status);
+ }
+}
+
+void ice_sess_on_peer_reset_connection(pj_ice_sess *ice,
+ pj_uint8_t transport_id,
+ pj_sockaddr_t* remote_addr)
+{
+ // The TCP link is reseted
+ if (!remote_addr)
+ return;
+
+ pj_ice_sess_check *check = get_current_check_at_state(ice, remote_addr,
+ PJ_ICE_SESS_CHECK_STATE_PENDING,
+ NULL);
+ if (!check) {
+ // Just check if it's not the first packet failing
+ check = get_current_check_at_state(ice, remote_addr,
+ PJ_ICE_SESS_CHECK_STATE_NEEDS_FIRST_PACKET,
+ NULL);
+ if (!check)
+ return;
+ }
+
+ const pj_ice_sess_cand *rcand = check->rcand;
+ if (rcand->type == PJ_ICE_CAND_TYPE_RELAYED) {
+ check->state = PJ_ICE_SESS_CHECK_STATE_NEEDS_RETRY;
+ check_set_state(ice, check,
+ PJ_ICE_SESS_CHECK_STATE_NEEDS_RETRY, 120104);
+ }
+}
+
+void ice_sess_on_peer_packet(pj_ice_sess *ice,
+ pj_uint8_t transport_id,
+ pj_sockaddr_t* remote_addr)
+{
+ // The TCP link received its bind request response
+ if (!ice || !remote_addr) {
+ return;
+ }
+ pj_ice_sess_check *check =
+ get_current_check_at_state(ice, remote_addr,
+ PJ_ICE_SESS_CHECK_STATE_NEEDS_FIRST_PACKET,
+ NULL);
+ if (!check) {
+ return;
+ }
+
+ const pj_ice_sess_cand *rcand = check->rcand;
+ if (rcand->type == PJ_ICE_CAND_TYPE_RELAYED) {
+ check_set_state(ice, check,
+ PJ_ICE_SESS_CHECK_STATE_IN_PROGRESS, PJ_SUCCESS);
+ }
+}
/* This callback is called when outgoing STUN request completed */
static void on_stun_request_complete(pj_stun_session *stun_sess,
@@ -2411,7 +2799,9 @@ static void on_stun_request_complete(pj_stun_session *stun_sess,
&check->lcand->base_addr,
&check->lcand->base_addr,
pj_sockaddr_get_len(&xaddr->sockaddr),
- &cand_id);
+ &cand_id,
+ check->rcand->transport == PJ_CAND_UDP ?
+ PJ_CAND_UDP : PJ_CAND_TCP_PASSIVE);
if (status != PJ_SUCCESS) {
check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_FAILED,
status);
@@ -2474,11 +2864,7 @@ static void on_stun_request_complete(pj_stun_session *stun_sess,
/* Perform 7.1.2.2.2. Updating Pair States.
* This may terminate ICE processing.
*/
- if (on_check_complete(ice, check)) {
- /* ICE complete! */
- pj_grp_lock_release(ice->grp_lock);
- return;
- }
+ on_check_complete(ice, check);
pj_grp_lock_release(ice->grp_lock);
}
@@ -2673,7 +3059,9 @@ static pj_status_t on_stun_rx_request(pj_stun_session *sess,
msg_data->has_req_data = PJ_FALSE;
/* Send the response */
- status = pj_stun_session_send_msg(sess, msg_data, PJ_TRUE, PJ_TRUE,
+ status = pj_stun_session_send_msg(sess, msg_data, PJ_TRUE,
+ pj_stun_session_tp_type(sess) ==
+ PJ_STUN_TP_UDP,
src_addr, src_addr_len, tdata);
@@ -2794,12 +3182,12 @@ static void handle_incoming_check(pj_ice_sess *ice,
/* Just get candidate with the highest priority and same transport ID
* for the specified component ID in the checklist.
*/
- for (i=0; i<ice->clist.count; ++i) {
- pj_ice_sess_check *c = &ice->clist.checks[i];
- if (c->lcand->comp_id == rcheck->comp_id &&
- c->lcand->transport_id == rcheck->transport_id)
+ for (i=0; i<ice->lcand_cnt; ++i) {
+ pj_ice_sess_cand* lcand_tmp = &ice->lcand[i];
+ if (lcand_tmp->comp_id == rcheck->comp_id &&
+ lcand_tmp->transport_id == rcheck->transport_id)
{
- lcand = c->lcand;
+ lcand = lcand_tmp;
break;
}
}
diff --git a/pjnath/src/pjnath/ice_strans.c b/pjnath/src/pjnath/ice_strans.c
index 3cb350c2a..82175e9e8 100644
--- a/pjnath/src/pjnath/ice_strans.c
+++ b/pjnath/src/pjnath/ice_strans.c
@@ -69,6 +69,7 @@ enum tp_type
# define RELAY_PREF 0
#endif
+#define MAX_RTP_SIZE 65536
/* The candidate type preference when STUN candidate is used */
static pj_uint8_t srflx_pref_table[PJ_ICE_CAND_TYPE_MAX] =
@@ -87,6 +88,14 @@ static pj_uint8_t srflx_pref_table[PJ_ICE_CAND_TYPE_MAX] =
#endif
};
+//////////////////////////////////////////////////////////////////////////////
+
+static pj_uint16_t GETVAL16H(const pj_uint8_t *buf1, const pj_uint8_t *buf2)
+{
+ return (pj_uint16_t) ((buf1[0] << 8) | (buf2[0] << 0));
+}
+
+//////////////////////////////////////////////////////////////////////////////
/* ICE callbacks */
static void on_ice_complete(pj_ice_sess *ice, pj_status_t status);
@@ -103,6 +112,20 @@ static void ice_rx_data(pj_ice_sess *ice,
const pj_sockaddr_t *src_addr,
unsigned src_addr_len);
+#if PJ_HAS_TCP
+static pj_status_t ice_wait_tcp_connection(pj_ice_sess *ice,
+ pj_ice_sess_checklist *clist,
+ unsigned check_id);
+
+static pj_status_t ice_reconnect_tcp_connection(pj_ice_sess *ice,
+ pj_ice_sess_checklist *clist,
+ unsigned check_id);
+
+static pj_status_t ice_close_tcp_connection(pj_ice_sess *ice,
+ pj_ice_sess_checklist *clist,
+ unsigned check_id);
+#endif
+
/* STUN socket callbacks */
/* Notification when incoming packet has been received. */
@@ -182,6 +205,16 @@ typedef struct pj_ice_strans_comp
} pj_ice_strans_comp;
+static pj_bool_t add_local_candidate(pj_ice_sess_cand *cand,
+ unsigned idx,
+ unsigned i,
+ unsigned *cand_cnt,
+ unsigned *max_cand_cnt,
+ pj_stun_sock_info stun_sock_info,
+ pj_ice_strans *ice_st,
+ pj_ice_strans_comp *comp,
+ pj_ice_cand_transport transport);
+
/* Pending send buffer */
typedef struct pending_send
{
@@ -225,6 +258,13 @@ struct pj_ice_strans
pj_bool_t destroy_req;/**< Destroy has been called? */
pj_bool_t cb_called; /**< Init error callback called?*/
pj_bool_t call_send_cb;/**< Need to call send cb? */
+
+ pj_uint8_t rtp_pkt[MAX_RTP_SIZE];
+ pj_uint8_t rx_buffer[MAX_RTP_SIZE];
+ pj_uint16_t rx_buffer_size;
+ pj_uint16_t rx_wanted_size;
+
+ pj_ssize_t last_data_len; /**< What the application is waiting. */
};
@@ -261,6 +301,7 @@ PJ_DEF(void) pj_ice_strans_cfg_default(pj_ice_strans_cfg *cfg)
pj_bzero(cfg, sizeof(*cfg));
cfg->af = pj_AF_INET();
+ cfg->protocol = PJ_ICE_TP_UDP;
pj_stun_config_init(&cfg->stun_cfg, NULL, 0, NULL, NULL);
pj_ice_strans_stun_cfg_default(&cfg->stun);
pj_ice_strans_turn_cfg_default(&cfg->turn);
@@ -278,6 +319,7 @@ PJ_DEF(void) pj_ice_strans_stun_cfg_default(pj_ice_strans_stun_cfg *cfg)
pj_bzero(cfg, sizeof(*cfg));
cfg->af = pj_AF_INET();
+ cfg->conn_type = PJ_STUN_TP_UDP;
cfg->port = PJ_STUN_PORT;
cfg->max_host_cands = 64;
cfg->ignore_stun_error = PJ_FALSE;
@@ -421,6 +463,9 @@ static pj_status_t add_update_turn(pj_ice_strans *ice_st,
cand->transport_id = tp_id;
cand->comp_id = (pj_uint8_t) comp->comp_id;
new_cand = PJ_TRUE;
+ cand->transport = turn_cfg->conn_type == PJ_TURN_TP_UDP ?
+ PJ_CAND_UDP :
+ PJ_CAND_TCP_PASSIVE;
}
/* Allocate and initialize TURN socket data */
@@ -428,6 +473,10 @@ static pj_status_t add_update_turn(pj_ice_strans *ice_st,
data->comp = comp;
data->transport_id = cand->transport_id;
+ if (turn_cfg->conn_type == PJ_TURN_TP_TCP) {
+ turn_cfg->alloc_param.peer_conn_type = PJ_TURN_TP_TCP;
+ }
+
/* Create the TURN transport */
status = pj_turn_sock_create(&ice_st->cfg.stun_cfg, turn_cfg->af,
turn_cfg->conn_type,
@@ -465,7 +514,7 @@ static pj_status_t add_update_turn(pj_ice_strans *ice_st,
return PJ_SUCCESS;
}
-static pj_bool_t ice_cand_equals(pj_ice_sess_cand *lcand,
+static pj_bool_t ice_cand_equals(pj_ice_sess_cand *lcand,
pj_ice_sess_cand *rcand)
{
if (lcand == NULL && rcand == NULL){
@@ -474,23 +523,23 @@ static pj_bool_t ice_cand_equals(pj_ice_sess_cand *lcand,
if (lcand == NULL || rcand == NULL){
return PJ_FALSE;
}
-
+
if (lcand->type != rcand->type
|| lcand->status != rcand->status
|| lcand->comp_id != rcand->comp_id
|| lcand->transport_id != rcand->transport_id
|| lcand->local_pref != rcand->local_pref
|| lcand->prio != rcand->prio
+ || lcand->transport != rcand->transport
|| pj_sockaddr_cmp(&lcand->addr, &rcand->addr) != 0
|| pj_sockaddr_cmp(&lcand->base_addr, &rcand->base_addr) != 0)
{
return PJ_FALSE;
}
-
+
return PJ_TRUE;
}
-
static pj_status_t add_stun_and_host(pj_ice_strans *ice_st,
pj_ice_strans_comp *comp,
unsigned idx,
@@ -541,6 +590,9 @@ static pj_status_t add_stun_and_host(pj_ice_strans *ice_st,
cand->local_pref = SRFLX_PREF;
cand->transport_id = CREATE_TP_ID(TP_STUN, idx);
cand->comp_id = (pj_uint8_t) comp->comp_id;
+ cand->transport = stun_cfg->conn_type == PJ_STUN_TP_UDP ?
+ PJ_CAND_UDP :
+ PJ_CAND_TCP_PASSIVE;
/* Allocate and initialize STUN socket data */
data = PJ_POOL_ZALLOC_T(ice_st->pool, sock_user_data);
@@ -549,8 +601,9 @@ static pj_status_t add_stun_and_host(pj_ice_strans *ice_st,
/* Create the STUN transport */
status = pj_stun_sock_create(&ice_st->cfg.stun_cfg, NULL,
- stun_cfg->af, &stun_sock_cb,
- sock_cfg, data, &comp->stun[idx].sock);
+ stun_cfg->af, stun_cfg->conn_type,
+ &stun_sock_cb, sock_cfg, data,
+ &comp->stun[idx].sock);
if (status != PJ_SUCCESS)
return status;
@@ -635,105 +688,154 @@ static pj_status_t add_stun_and_host(pj_ice_strans *ice_st,
}
for (i = 0; i < stun_sock_info.alias_cnt &&
- cand_cnt < stun_cfg->max_host_cands; ++i)
+ cand_cnt < stun_cfg->max_host_cands &&
+ status == PJ_SUCCESS; ++i)
{
- unsigned j;
- pj_bool_t cand_duplicate = PJ_FALSE;
- char addrinfo[PJ_INET6_ADDRSTRLEN+10];
- const pj_sockaddr *addr = &stun_sock_info.aliases[i];
+ status = !PJ_SUCCESS;
+ if (stun_sock_info.conn_type == PJ_STUN_TP_UDP) {
+ status = add_local_candidate(cand, idx, i,
+ &cand_cnt, &max_cand_cnt,
+ stun_sock_info, ice_st, comp,
+ PJ_CAND_UDP);
+ } else {
+ status = add_local_candidate(cand, idx, i,
+ &cand_cnt, &max_cand_cnt,
+ stun_sock_info, ice_st, comp,
+ PJ_CAND_TCP_PASSIVE);
+ /** RFC 6544, Section 4.1:
+ * First, agents SHOULD obtain host candidates as described in
+ * Section 5.1. Then, each agent SHOULD "obtain" (allocate a
+ * placeholder for) an active host candidate for each component of
+ * each TCP-capable media stream on each interface that the host
+ * has. The agent does not yet have to actually allocate a port for
+ * these candidates, but they are used for the creation of the check
+ * lists.
+ */
+ status = add_local_candidate(cand, idx, i,
+ &cand_cnt, &max_cand_cnt,
+ stun_sock_info, ice_st, comp,
+ PJ_CAND_TCP_ACTIVE);
+ }
+ }
+ }
- if (max_cand_cnt==0) {
- PJ_LOG(4,(ice_st->obj_name, "Too many host candidates"));
- break;
- }
+ return status;
+}
- /* Ignore loopback addresses if cfg->stun.loop_addr is unset */
- if (stun_cfg->loop_addr==PJ_FALSE) {
- if (stun_cfg->af == pj_AF_INET() &&
- (pj_ntohl(addr->ipv4.sin_addr.s_addr)>>24)==127)
- {
- continue;
- }
- else if (stun_cfg->af == pj_AF_INET6()) {
- pj_in6_addr in6addr = {{0}};
- in6addr.s6_addr[15] = 1;
- if (pj_memcmp(&in6addr, &addr->ipv6.sin6_addr,
- sizeof(in6addr))==0)
- {
- continue;
- }
- }
- }
+static pj_bool_t add_local_candidate(pj_ice_sess_cand *cand,
+ unsigned idx,
+ unsigned i,
+ unsigned *cand_cnt,
+ unsigned *max_cand_cnt,
+ pj_stun_sock_info stun_sock_info,
+ pj_ice_strans *ice_st,
+ pj_ice_strans_comp *comp,
+ pj_ice_cand_transport transport)
+{
+ unsigned j;
+ pj_bool_t cand_duplicate = PJ_FALSE;
+ char addrinfo[PJ_INET6_ADDRSTRLEN+10];
+ const pj_sockaddr *addr = &stun_sock_info.aliases[i];
+ pj_ice_strans_stun_cfg *stun_cfg = &ice_st->cfg.stun_tp[idx];
- /* Ignore IPv6 link-local address, unless it is the default
- * address (first alias).
- */
- if (stun_cfg->af == pj_AF_INET6() && i != 0) {
- const pj_in6_addr *a = &addr->ipv6.sin6_addr;
- if (a->s6_addr[0] == 0xFE && (a->s6_addr[1] & 0xC0) == 0x80)
- continue;
- }
- cand = &comp->cand_list[comp->cand_cnt];
-
- cand->type = PJ_ICE_CAND_TYPE_HOST;
- cand->status = PJ_SUCCESS;
- cand->local_pref = HOST_PREF;
- cand->transport_id = CREATE_TP_ID(TP_STUN, idx);
- cand->comp_id = (pj_uint8_t) comp->comp_id;
- pj_sockaddr_cp(&cand->addr, addr);
- pj_sockaddr_cp(&cand->base_addr, addr);
- pj_bzero(&cand->rel_addr, sizeof(cand->rel_addr));
-
- /* Check if not already in list */
- for (j=0; j<comp->cand_cnt; j++) {
- if (ice_cand_equals(cand, &comp->cand_list[j])) {
- cand_duplicate = PJ_TRUE;
- break;
- }
- }
+ if (*max_cand_cnt==0) {
+ PJ_LOG(4,(ice_st->obj_name, "Too many host candidates"));
+ return !PJ_SUCCESS;
+ }
- if (cand_duplicate) {
- PJ_LOG(4, (ice_st->obj_name,
- "Comp %d: host candidate %s (tpid=%d) is a duplicate",
- comp->comp_id, pj_sockaddr_print(&cand->addr, addrinfo,
- sizeof(addrinfo), 3), cand->transport_id));
+ /* Ignore loopback addresses if cfg->stun.loop_addr is unset */
+ if (stun_cfg->loop_addr==PJ_FALSE) {
+ if (stun_cfg->af == pj_AF_INET() &&
+ (pj_ntohl(addr->ipv4.sin_addr.s_addr)>>24)==127)
+ {
+ return PJ_SUCCESS;
+ }
+ else if (stun_cfg->af == pj_AF_INET6()) {
+ pj_in6_addr in6addr = {{0}};
+ in6addr.s6_addr[15] = 1;
+ if (pj_memcmp(&in6addr, &addr->ipv6.sin6_addr,
+ sizeof(in6addr))==0)
+ {
+ return PJ_SUCCESS;
+ }
+ }
+ }
- pj_bzero(&cand->addr, sizeof(cand->addr));
- pj_bzero(&cand->base_addr, sizeof(cand->base_addr));
- continue;
- } else {
- comp->cand_cnt+=1;
- cand_cnt++;
- max_cand_cnt--;
- }
-
- pj_ice_calc_foundation(ice_st->pool, &cand->foundation,
- cand->type, &cand->base_addr);
+ /* Ignore IPv6 link-local address, unless it is the default
+ * address (first alias).
+ */
+ if (stun_cfg->af == pj_AF_INET6() && i != 0) {
+ const pj_in6_addr *a = &addr->ipv6.sin6_addr;
+ if (a->s6_addr[0] == 0xFE && (a->s6_addr[1] & 0xC0) == 0x80)
+ return PJ_SUCCESS;
+ }
- /* Set default candidate with the preferred default
- * address family
- */
- if (comp->ice_st->cfg.af != pj_AF_UNSPEC() &&
- addr->addr.sa_family == comp->ice_st->cfg.af &&
- comp->cand_list[comp->default_cand].base_addr.addr.sa_family !=
- ice_st->cfg.af)
- {
- comp->default_cand = (unsigned)(cand - comp->cand_list);
- }
+ cand = &comp->cand_list[comp->cand_cnt];
- PJ_LOG(4,(ice_st->obj_name,
- "Comp %d/%d: host candidate %s (tpid=%d) added",
- comp->comp_id, comp->cand_cnt-1,
- pj_sockaddr_print(&cand->addr, addrinfo,
- sizeof(addrinfo), 3),
- cand->transport_id));
- }
+ cand->type = PJ_ICE_CAND_TYPE_HOST;
+ cand->status = PJ_SUCCESS;
+ cand->local_pref = HOST_PREF;
+ cand->transport_id = CREATE_TP_ID(TP_STUN, idx);
+ cand->comp_id = (pj_uint8_t) comp->comp_id;
+ cand->transport = transport;
+
+ pj_sockaddr_cp(&cand->addr, addr);
+ pj_sockaddr_cp(&cand->base_addr, addr);
+ pj_bzero(&cand->rel_addr, sizeof(cand->rel_addr));
+
+ /* Check if not already in list */
+ for (j=0; j<comp->cand_cnt; j++) {
+ if (ice_cand_equals(cand, &comp->cand_list[j])) {
+ cand_duplicate = PJ_TRUE;
+ return !PJ_SUCCESS;
+ }
}
- return status;
-}
+ if (cand_duplicate) {
+ PJ_LOG(4, (ice_st->obj_name,
+ "Comp %d: host candidate %s (tpid=%d) is a duplicate",
+ comp->comp_id,
+ pj_sockaddr_print(&cand->addr,
+ addrinfo, sizeof(addrinfo), 3),
+ cand->transport_id));
+
+ pj_bzero(&cand->addr, sizeof(cand->addr));
+ pj_bzero(&cand->base_addr, sizeof(cand->base_addr));
+ return PJ_SUCCESS;
+ } else {
+ comp->cand_cnt+=1;
+ (*cand_cnt)++;
+ (*max_cand_cnt)--;
+ }
+
+ pj_ice_calc_foundation(ice_st->pool, &cand->foundation,
+ cand->type, &cand->base_addr);
+ /* Set default candidate with the preferred default
+ * address family
+ */
+ if (comp->ice_st->cfg.af != pj_AF_UNSPEC() &&
+ addr->addr.sa_family == comp->ice_st->cfg.af &&
+ comp->cand_list[comp->default_cand].base_addr.addr.sa_family !=
+ ice_st->cfg.af)
+ {
+ comp->default_cand = (unsigned)(cand - comp->cand_list);
+ }
+
+ if (transport == PJ_CAND_TCP_ACTIVE) {
+ // Use the port 9 (DISCARD Protocol) for TCP active candidates.
+ pj_sockaddr_set_port(&cand->addr, 9);
+ }
+
+ PJ_LOG(4,(ice_st->obj_name,
+ "Comp %d/%d: host candidate %s (tpid=%d) added",
+ comp->comp_id, comp->cand_cnt-1,
+ pj_sockaddr_print(&cand->addr, addrinfo,
+ sizeof(addrinfo), 3),
+ cand->transport_id));
+ return PJ_SUCCESS;
+}
/*
* Create the component.
@@ -816,7 +918,7 @@ static pj_status_t alloc_send_buf(pj_ice_strans *ice_st, unsigned buf_size)
{
if (buf_size > ice_st->buf_size) {
unsigned i;
-
+
if (ice_st->is_pending) {
/* The current buffer is insufficient, but still currently used.*/
return PJ_EBUSY;
@@ -839,7 +941,7 @@ static pj_status_t alloc_send_buf(pj_ice_strans *ice_st, unsigned buf_size)
}
ice_st->buf_idx = ice_st->empty_idx = 0;
}
-
+
return PJ_SUCCESS;
}
@@ -906,7 +1008,7 @@ PJ_DEF(pj_status_t) pj_ice_strans_create( const char *name,
/* To maintain backward compatibility, check if old/deprecated setting is set
* and the new setting is not, copy the value to the new setting.
*/
- if (cfg->stun_tp_cnt == 0 &&
+ if (cfg->stun_tp_cnt == 0 &&
(cfg->stun.server.slen || cfg->stun.max_host_cands))
{
ice_st->cfg.stun_tp_cnt = 1;
@@ -1105,7 +1207,7 @@ static void sess_init_update(pj_ice_strans *ice_st)
pj_ice_get_cand_type_name(cand->type)));
return;
}
-
+
if (status == PJ_EUNKNOWN) {
status = cand->status;
} else {
@@ -1114,7 +1216,7 @@ static void sess_init_update(pj_ice_strans *ice_st)
status = PJ_SUCCESS;
}
}
-
+
if (status != PJ_SUCCESS)
break;
}
@@ -1207,6 +1309,11 @@ PJ_DEF(pj_status_t) pj_ice_strans_init_ice(pj_ice_strans *ice_st,
ice_cb.on_ice_complete = &on_ice_complete;
ice_cb.on_rx_data = &ice_rx_data;
ice_cb.on_tx_pkt = &ice_tx_pkt;
+#if PJ_HAS_TCP
+ ice_cb.wait_tcp_connection = &ice_wait_tcp_connection;
+ ice_cb.reconnect_tcp_connection = &ice_reconnect_tcp_connection;
+ ice_cb.close_tcp_connection = &ice_close_tcp_connection;
+#endif
/* Create! */
status = pj_ice_sess_create(&ice_st->cfg.stun_cfg, ice_st->obj_name, role,
@@ -1282,7 +1389,8 @@ PJ_DEF(pj_status_t) pj_ice_strans_init_ice(pj_ice_strans *ice_st,
&cand->foundation, &cand->addr,
&cand->base_addr, &cand->rel_addr,
pj_sockaddr_get_len(&cand->addr),
- (unsigned*)&ice_cand_id);
+ (unsigned*)&ice_cand_id,
+ cand->transport);
if (status != PJ_SUCCESS)
goto on_error;
}
@@ -1544,7 +1652,7 @@ pj_ice_strans_get_valid_pair(const pj_ice_strans *ice_st,
PJ_DEF(pj_status_t) pj_ice_strans_stop_ice(pj_ice_strans *ice_st)
{
PJ_ASSERT_RETURN(ice_st, PJ_EINVAL);
-
+
/* Protect with group lock, since this may cause race condition with
* pj_ice_strans_sendto2().
* See ticket #1877.
@@ -1578,7 +1686,7 @@ static pj_status_t use_buffer( pj_ice_strans *ice_st,
status = alloc_send_buf(ice_st, data_len);
if (status != PJ_SUCCESS)
return status;
-
+
if (ice_st->is_pending && ice_st->empty_idx == ice_st->buf_idx) {
/* We don't use buffer or there's no more empty buffer. */
return PJ_EBUSY;
@@ -1593,12 +1701,12 @@ static pj_status_t use_buffer( pj_ice_strans *ice_st,
pj_sockaddr_cp(&ice_st->send_buf[idx].dst_addr, dst_addr);
ice_st->send_buf[idx].dst_addr_len = dst_addr_len;
*buffer = ice_st->send_buf[idx].buffer;
-
+
if (ice_st->is_pending) {
/* We'll continue later since there's still a pending send. */
return PJ_EPENDING;
}
-
+
ice_st->is_pending = PJ_TRUE;
ice_st->buf_idx = idx;
@@ -1651,6 +1759,9 @@ static pj_status_t send_data(pj_ice_strans *ice_st,
}
}
+ def_cand = &comp->cand_list[comp->default_cand];
+ pj_bool_t add_header = def_cand->transport != PJ_CAND_UDP;
+
/* If ICE is available, send data with ICE, otherwise send with the
* default candidate selected during initialization.
*
@@ -1659,16 +1770,37 @@ static pj_status_t send_data(pj_ice_strans *ice_st,
*/
if (ice_st->ice && ice_st->state == PJ_ICE_STRANS_STATE_RUNNING) {
status = pj_ice_sess_send_data(ice_st->ice, comp_id, buf, data_len);
-
+
pj_grp_lock_release(ice_st->grp_lock);
-
+
goto on_return;
- }
+ }
pj_grp_lock_release(ice_st->grp_lock);
- def_cand = &comp->cand_list[comp->default_cand];
-
+ /* TCP, add header */
+ if (add_header) {
+ /*
+ * RFC6544 ICE requires an agent to demultiplex STUN and
+ * application-layer traffic, since they appear on the same port. This
+ * demultiplexing is described in [RFC5245] and is done using the magic
+ * cookie and other fields of the message. Stream-oriented transports
+ * introduce another wrinkle, since they require a way to frame the
+ * connection so that the application and STUN packets can be extracted
+ * in order to differentiate STUN packets from application-layer
+ * traffic. For this reason, TCP media streams utilizing ICE use the
+ * basic framing provided in RFC 4571 [RFC4571], even if the application
+ * layer protocol is not RTP.
+ */
+ pj_uint8_t header_1 = data_len % 256;
+ pj_uint8_t header_0 = data_len >> 8;
+ pj_memcpy(&ice_st->rtp_pkt, &(header_0), sizeof(pj_uint8_t));
+ pj_memcpy(&ice_st->rtp_pkt[1], &(header_1), sizeof(pj_uint8_t));
+ pj_memcpy(&ice_st->rtp_pkt[2], (unsigned char *)data, data_len);
+ buf = &ice_st->rtp_pkt;
+ data_len += 2;
+ }
+
if (def_cand->status == PJ_SUCCESS) {
unsigned tp_idx = GET_TP_IDX(def_cand->transport_id);
@@ -1730,6 +1862,11 @@ static pj_status_t send_data(pj_ice_strans *ice_st,
status = pj_stun_sock_sendto(comp->stun[tp_idx].sock, NULL, buf,
(unsigned)data_len, 0, dest_addr,
dest_addr_len);
+ /* Do not count the header */
+ if (add_header) {
+ data_len -= sizeof(pj_uint16_t);
+ }
+
goto on_return;
}
@@ -1738,8 +1875,14 @@ static pj_status_t send_data(pj_ice_strans *ice_st,
on_return:
/* We continue later in on_data_sent() callback. */
- if (status == PJ_EPENDING)
+ if (status == PJ_EPENDING) {
+ ice_st->last_data_len = data_len;
+ if (add_header) {
+ // Don't forget the header
+ ice_st->last_data_len += sizeof(pj_uint16_t);
+ }
return status;
+ }
if (call_cb) {
on_data_sent(ice_st, (status == PJ_SUCCESS? data_len: -status));
@@ -1771,7 +1914,7 @@ PJ_DEF(pj_status_t) pj_ice_strans_sendto( pj_ice_strans *ice_st,
dst_addr_len, PJ_TRUE, PJ_FALSE);
if (status == PJ_EPENDING)
status = PJ_SUCCESS;
-
+
return status;
}
#endif
@@ -1842,7 +1985,22 @@ static void on_ice_complete(pj_ice_sess *ice, pj_status_t status)
sizeof(lip), 3);
pj_sockaddr_print(&check->rcand->addr, rip,
sizeof(rip), 3);
-
+#if PJ_HAS_TCP
+ int idx = -1;
+ for (int i=0; i<ice_st->cfg.stun_tp_cnt; ++i) {
+ if (ice_st->cfg.stun_tp[i].af ==
+ check->rcand->addr.addr.sa_family)
+ {
+ idx = i;
+ break;
+ }
+ }
+ if (idx == -1) {
+ PJ_LOG(4, (ice_st->obj_name,
+ "Comp %d: No STUN sock found.",
+ comp->comp_id));
+ }
+#endif
if (tp_typ == TP_TURN) {
/* Activate channel binding for the remote address
* for more efficient data transfer using TURN.
@@ -1936,6 +2094,29 @@ static pj_status_t ice_tx_pkt(pj_ice_sess *ice,
pj_sockaddr_get_port(dst_addr),
tp_typ));
+ /* TCP, add header */
+ if (comp->ice_st->cfg.stun_tp->conn_type == PJ_STUN_TP_TCP) {
+ /*
+ * RFC6544 ICE requires an agent to demultiplex STUN and
+ * application-layer traffic, since they appear on the same port. This
+ * demultiplexing is described in [RFC5245] and is done using the magic
+ * cookie and other fields of the message. Stream-oriented transports
+ * introduce another wrinkle, since they require a way to frame the
+ * connection so that the application and STUN packets can be extracted
+ * in order to differentiate STUN packets from application-layer
+ * traffic. For this reason, TCP media streams utilizing ICE use the
+ * basic framing provided in RFC 4571 [RFC4571], even if the application
+ * layer protocol is not RTP.
+ */
+ pj_uint8_t header_1 = size % 256;
+ pj_uint8_t header_0 = size >> 8;
+ pj_memcpy(&ice_st->rtp_pkt, &(header_0), sizeof(pj_uint8_t));
+ pj_memcpy(&ice_st->rtp_pkt[1], &(header_1), sizeof(pj_uint8_t));
+ pj_memcpy(&ice_st->rtp_pkt[2], (unsigned char *)pkt, size);
+ buf = &ice_st->rtp_pkt;
+ size += 2;
+ }
+
if (tp_typ == TP_TURN) {
if (comp->turn[tp_idx].sock) {
status = pj_turn_sock_sendto(comp->turn[tp_idx].sock,
@@ -1958,7 +2139,7 @@ static pj_status_t ice_tx_pkt(pj_ice_sess *ice,
if (status != PJ_SUCCESS) {
goto on_return;
}
-
+
pj_sockaddr_cp(&comp->dst_addr, dst_addr);
comp->synth_addr_len = pj_sockaddr_get_len(&comp->synth_addr);
}
@@ -1969,9 +2150,13 @@ static pj_status_t ice_tx_pkt(pj_ice_sess *ice,
dest_addr_len = dst_addr_len;
}
- status = pj_stun_sock_sendto(comp->stun[tp_idx].sock, NULL,
- buf, (unsigned)size, 0,
- dest_addr, dest_addr_len);
+ if (comp->stun[tp_idx].sock) {
+ status = pj_stun_sock_sendto(comp->stun[tp_idx].sock, NULL,
+ buf, (unsigned)size, 0,
+ dest_addr, dest_addr_len);
+ } else {
+ status = PJ_EINVALIDOP;
+ }
} else {
pj_assert(!"Invalid transport ID");
status = PJ_EINVALIDOP;
@@ -2017,7 +2202,7 @@ static void check_pending_send(pj_ice_strans *ice_st)
if (ice_st->num_buf > 0)
ice_st->buf_idx = (ice_st->buf_idx + 1) % ice_st->num_buf;
-
+
if (ice_st->num_buf > 0 && ice_st->buf_idx != ice_st->empty_idx) {
/* There's some pending send. Send it one by one. */
pending_send *ps = &ice_st->send_buf[ice_st->buf_idx];
@@ -2031,6 +2216,212 @@ static void check_pending_send(pj_ice_strans *ice_st)
}
}
+static void on_peer_connection(pj_stun_session* sess,
+ pj_status_t status,
+ pj_sockaddr_t* remote_addr)
+{
+
+ pj_stun_sock *stun_sock;
+ sock_user_data *data;
+ pj_ice_strans_comp *comp;
+ pj_ice_strans *ice_st;
+
+ stun_sock = (pj_stun_sock *)pj_stun_session_get_user_data(sess);
+ /* We have disassociated ourselves from the STUN session */
+ if (!stun_sock)
+ return;
+
+ data = (sock_user_data *)pj_stun_sock_get_user_data(stun_sock);
+ /* We have disassociated ourselves from the STUN socket */
+ if (!data)
+ return;
+
+ comp = data->comp;
+ ice_st = comp->ice_st;
+
+ /* Incorrect ICE */
+ if (!ice_st || !ice_st->ice)
+ return;
+
+ ice_sess_on_peer_connection(ice_st->ice,
+ data->transport_id, status, remote_addr);
+}
+
+static void on_peer_reset_connection(pj_stun_session* sess,
+ pj_sockaddr_t* remote_addr)
+{
+ pj_stun_sock *stun_sock;
+ sock_user_data *data;
+ pj_ice_strans_comp *comp;
+ pj_ice_strans *ice_st;
+
+ stun_sock = (pj_stun_sock *)pj_stun_session_get_user_data(sess);
+ /* We have disassociated ourselves from the STUN session */
+ if (!stun_sock)
+ return;
+
+ data = (sock_user_data *)pj_stun_sock_get_user_data(stun_sock);
+ /* We have disassociated ourselves from the STUN socket */
+ if (!data)
+ return;
+
+ comp = data->comp;
+ ice_st = comp->ice_st;
+
+ /* Incorrect ICE */
+ if (!ice_st || !ice_st->ice)
+ return;
+
+ ice_sess_on_peer_reset_connection(ice_st->ice,
+ data->transport_id, remote_addr);
+}
+
+static void on_peer_packet(pj_stun_session* sess, pj_sockaddr_t* remote_addr)
+{
+
+ if (!sess || !remote_addr)
+ return;
+
+ pj_stun_sock *stun_sock;
+ sock_user_data *data;
+ pj_ice_strans_comp *comp;
+ pj_ice_strans *ice_st;
+
+ stun_sock = (pj_stun_sock *)pj_stun_session_get_user_data(sess);
+ /* We have disassociated ourselves from the STUN session */
+ if (!stun_sock)
+ return;
+
+ data = (sock_user_data *)pj_stun_sock_get_user_data(stun_sock);
+ /* We have disassociated ourselves from the STUN socket */
+ if (!data)
+ return;
+
+ comp = data->comp;
+ if (!comp)
+ return;
+
+ ice_st = comp->ice_st;
+ /* Incorrect ICE */
+ if (!ice_st || !ice_st->ice)
+ return;
+
+ ice_sess_on_peer_packet(ice_st->ice, data->transport_id, remote_addr);
+}
+
+#if PJ_HAS_TCP
+static pj_status_t ice_wait_tcp_connection(pj_ice_sess *ice,
+ pj_ice_sess_checklist *clist,
+ unsigned check_id)
+{
+ pj_ice_sess_check *check = &clist->checks[check_id];
+ const pj_ice_sess_cand *lcand = check->lcand;
+ const pj_ice_sess_cand *rcand = check->rcand;
+ pj_ice_strans *ice_st = (pj_ice_strans *)ice->user_data;
+ pj_ice_strans_comp *st_comp = ice_st->comp[lcand->comp_id - 1];
+
+ int idx = -1;
+ for (int i=0; i<ice_st->cfg.stun_tp_cnt; ++i)
+ if (ice_st->cfg.stun_tp[i].af == rcand->addr.addr.sa_family) {
+ idx = i;
+ break;
+ }
+
+ if (idx == -1) {
+ PJ_LOG(4, (ice_st->obj_name, "Comp %d: No STUN sock found.",
+ st_comp->comp_id));
+ return PJ_EINVAL;
+ }
+ if (st_comp->stun[idx].sock) {
+ pj_stun_session *sess = pj_stun_sock_get_session(st_comp->stun[idx].sock);
+ if (!sess) {
+ PJ_LOG(4, (ice_st->obj_name, "Comp %d: No STUN session.",
+ st_comp->comp_id));
+ return PJ_EINVAL;
+ }
+ pj_stun_session_callback(sess)->on_peer_connection =
+ &on_peer_connection;
+ pj_stun_session_callback(sess)->on_peer_reset_connection =
+ &on_peer_reset_connection;
+ pj_stun_session_callback(sess)->on_peer_packet = &on_peer_packet;
+
+ return pj_stun_sock_connect_active(st_comp->stun[idx].sock,
+ &rcand->addr,
+ rcand->addr.addr.sa_family);
+ }
+
+ return PJ_EINVAL;
+}
+
+static pj_status_t ice_reconnect_tcp_connection(pj_ice_sess *ice,
+ pj_ice_sess_checklist *clist,
+ unsigned check_id)
+{
+ pj_ice_sess_check *check = &clist->checks[check_id];
+ const pj_ice_sess_cand *lcand = check->lcand;
+ const pj_ice_sess_cand *rcand = check->rcand;
+ pj_ice_strans *ice_st = (pj_ice_strans *)ice->user_data;
+ pj_ice_strans_comp *st_comp = ice_st->comp[lcand->comp_id - 1];
+
+ int idx = -1;
+ for (int i=0; i<ice_st->cfg.stun_tp_cnt; ++i)
+ if (ice_st->cfg.stun_tp[i].af == rcand->addr.addr.sa_family) {
+ idx = i;
+ break;
+ }
+
+ if (idx == -1) {
+ PJ_LOG(4, (ice_st->obj_name, "Comp %d: No STUN sock found.",
+ st_comp->comp_id));
+ return PJ_EINVAL;
+ }
+
+ if (st_comp->stun[idx].sock) {
+ pj_stun_session *sess = pj_stun_sock_get_session(st_comp->stun[idx].sock);
+ if (!sess) {
+ PJ_LOG(4, (ice_st->obj_name, "Comp %d: No STUN session.",
+ st_comp->comp_id));
+ return PJ_EINVAL;
+ }
+ pj_stun_session_callback(sess)->on_peer_connection =
+ &on_peer_connection;
+ pj_stun_session_callback(sess)->on_peer_reset_connection =
+ &on_peer_reset_connection;
+ pj_stun_session_callback(sess)->on_peer_packet = &on_peer_packet;
+ return pj_stun_sock_reconnect_active(st_comp->stun[idx].sock,
+ &rcand->addr,
+ rcand->addr.addr.sa_family);
+ }
+
+ return PJ_EINVAL;
+}
+
+static pj_status_t ice_close_tcp_connection(pj_ice_sess *ice,
+ pj_ice_sess_checklist *clist,
+ unsigned check_id)
+{
+ pj_ice_sess_check *check = &clist->checks[check_id];
+ const pj_ice_sess_cand *lcand = check->lcand;
+ const pj_ice_sess_cand *rcand = check->rcand;
+ pj_ice_strans *ice_st = (pj_ice_strans *)ice->user_data;
+ pj_ice_strans_comp *st_comp = ice_st->comp[lcand->comp_id - 1];
+
+ int idx = -1;
+ for (int i=0; i<ice_st->cfg.stun_tp_cnt; ++i)
+ if (ice_st->cfg.stun_tp[i].af == rcand->addr.addr.sa_family) {
+ idx = i;
+ break;
+ }
+
+ if (idx != -1 && st_comp->stun[idx].sock) {
+ const pj_ice_sess_cand *rcand = check->rcand;
+ return pj_stun_sock_close(st_comp->stun[idx].sock, &rcand->addr);
+ }
+
+ return PJ_EINVAL;
+}
+#endif
+
/* Notifification when asynchronous send operation via STUN/TURN
* has completed.
*/
@@ -2039,7 +2430,8 @@ static pj_bool_t on_data_sent(pj_ice_strans *ice_st, pj_ssize_t sent)
if (ice_st->destroy_req || !ice_st->is_pending)
return PJ_TRUE;
- if (ice_st->call_send_cb && ice_st->cb.on_data_sent) {
+ if (ice_st->call_send_cb && ice_st->cb.on_data_sent
+ && sent == ice_st->last_data_len /* Only app data should be announced */) {
(*ice_st->cb.on_data_sent)(ice_st, sent);
}
@@ -2196,7 +2588,7 @@ static pj_bool_t stun_on_status(pj_stun_sock *stun_sock,
{
/* We get an IPv4 mapped address for our IPv6
* host address.
- */
+ */
comp->ipv4_mapped = PJ_TRUE;
/* Find other host candidates with the same (IPv6)
@@ -2208,7 +2600,7 @@ static pj_bool_t stun_on_status(pj_stun_sock *stun_sock,
if (comp->cand_list[i].type != PJ_ICE_CAND_TYPE_HOST)
continue;
-
+
a1 = &comp->cand_list[i].addr;
a2 = &cand->base_addr;
if (pj_memcmp(pj_sockaddr_get_addr(a1),
@@ -2225,7 +2617,7 @@ static pj_bool_t stun_on_status(pj_stun_sock *stun_sock,
pj_sockaddr_cp(&cand->base_addr, &info.mapped_addr);
pj_sockaddr_cp(&cand->rel_addr, &info.mapped_addr);
}
-
+
/* Eliminate the srflx candidate if the address is
* equal to other (host) candidates.
*/
@@ -2268,11 +2660,11 @@ static pj_bool_t stun_on_status(pj_stun_sock *stun_sock,
sizeof(ipaddr), 3)));
sess_init_update(ice_st);
-
+
if (op == PJ_STUN_SOCK_MAPPED_ADDR_CHANGE &&
ice_st->cb.on_ice_complete)
{
- (*ice_st->cb.on_ice_complete)(ice_st,
+ (*ice_st->cb.on_ice_complete)(ice_st,
PJ_ICE_STRANS_OP_ADDR_CHANGE,
status);
}
@@ -2318,6 +2710,10 @@ static pj_bool_t stun_on_status(pj_stun_sock *stun_sock,
}
}
break;
+ case PJ_STUN_SESS_DESTROYED:
+ case PJ_STUN_TCP_CONNECT_ERROR:
+ default:
+ break;
}
return pj_grp_lock_dec_ref(ice_st->grp_lock)? PJ_FALSE : PJ_TRUE;
@@ -2358,14 +2754,103 @@ static void turn_on_rx_data(pj_turn_sock *turn_sock,
} else {
/* Hand over the packet to ICE */
- status = pj_ice_sess_on_rx_pkt(comp->ice_st->ice, comp->comp_id,
- data->transport_id, pkt, pkt_len,
- peer_addr, addr_len);
+ if (comp->ice_st->cfg.turn_tp->conn_type == PJ_TURN_TP_TCP && pkt_len > 0) {
+ unsigned parsed = 0;
+ pj_status_t status;
- if (status != PJ_SUCCESS) {
- ice_st_perror(comp->ice_st,
- "Error processing packet from TURN relay",
- status);
+ do {
+ pj_uint16_t leftover = pkt_len - parsed;
+ pj_uint8_t *current_packet = ((pj_uint8_t *)(pkt)) + parsed;
+
+ /**
+ * RFC6544, the packet is wrapped into a packet following the
+ * RFC4571
+ */
+ pj_bool_t store_remaining = PJ_TRUE;
+ if (comp->ice_st->rx_buffer_size ||
+ comp->ice_st->rx_wanted_size)
+ {
+ /* a single packet left to process */
+ if (comp->ice_st->rx_buffer_size == 1 && comp->ice_st->rx_wanted_size == 0) {
+ /* get last frame's lenght from its header */
+ leftover = GETVAL16H(comp->ice_st->rx_buffer,
+ current_packet);
+ /* adjust counters accordingly */
+ comp->ice_st->rx_buffer_size = 0;
+ current_packet++;
+ parsed++;
+
+ if (leftover + parsed <= pkt_len) {
+ /* we didn't get what we were promissed in the
+ * header. furthermore, this was the last frame and
+ * therefore we're done.
+ */
+ store_remaining = PJ_FALSE;
+ parsed += leftover;
+ } else {
+ comp->ice_st->rx_wanted_size = leftover;
+ }
+ } else if (leftover + comp->ice_st->rx_buffer_size >=
+ comp->ice_st->rx_wanted_size)
+ {
+ /* We have enough leftover bytes in buffer to build a new
+ * packet and parse it
+ */
+ store_remaining = PJ_FALSE;
+
+ pj_uint16_t eaten_bytes = comp->ice_st->rx_wanted_size -
+ comp->ice_st->rx_buffer_size;
+ pj_memcpy(comp->ice_st->rx_buffer +
+ comp->ice_st->rx_buffer_size,
+ current_packet, eaten_bytes);
+
+ leftover = comp->ice_st->rx_wanted_size;
+ current_packet = comp->ice_st->rx_buffer;
+ parsed += eaten_bytes;
+
+ comp->ice_st->rx_buffer_size = 0;
+ comp->ice_st->rx_wanted_size = 0;
+ }
+ } else if (leftover > 1) {
+ leftover = GETVAL16H(current_packet, current_packet+1);
+ current_packet += 2;
+ parsed += 2;
+ if (leftover + parsed <= pkt_len) {
+ store_remaining = PJ_FALSE;
+ parsed += leftover;
+ } else {
+ comp->ice_st->rx_wanted_size = leftover;
+ }
+ }
+
+ if (store_remaining) {
+ leftover = pkt_len - parsed;
+ pj_memcpy(comp->ice_st->rx_buffer +
+ comp->ice_st->rx_buffer_size,
+ current_packet, leftover);
+ comp->ice_st->rx_buffer_size += leftover;
+ status = PJ_SUCCESS;
+ break;
+ }
+
+ status = pj_ice_sess_on_rx_pkt(comp->ice_st->ice, comp->comp_id,
+ data->transport_id,
+ current_packet, leftover,
+ peer_addr, addr_len);
+ if (status != PJ_SUCCESS) {
+ ice_st_perror(comp->ice_st,
+ "Error processing packet from TURN relay",
+ status);
+ }
+ } while (parsed < pkt_len);
+ } else {
+ status = pj_ice_sess_on_rx_pkt(comp->ice_st->ice, comp->comp_id,
+ data->transport_id, pkt, pkt_len,
+ peer_addr, addr_len);
+ if (status != PJ_SUCCESS)
+ ice_st_perror(comp->ice_st,
+ "Error processing packet from TURN relay",
+ status);
}
}
diff --git a/pjnath/src/pjnath/nat_detect.c b/pjnath/src/pjnath/nat_detect.c
index db0de10bc..808342bec 100644
--- a/pjnath/src/pjnath/nat_detect.c
+++ b/pjnath/src/pjnath/nat_detect.c
@@ -329,7 +329,8 @@ PJ_DEF(pj_status_t) pj_stun_detect_nat_type2(const pj_sockaddr *server,
sess_cb.on_request_complete = &on_request_complete;
sess_cb.on_send_msg = &on_send_msg;
status = pj_stun_session_create(stun_cfg, pool->obj_name, &sess_cb,
- PJ_FALSE, sess->grp_lock, &sess->stun_sess);
+ PJ_FALSE, sess->grp_lock, &sess->stun_sess,
+ PJ_STUN_TP_UDP);
if (status != PJ_SUCCESS)
goto on_error;
@@ -875,7 +876,9 @@ static pj_status_t send_test(nat_detect_session *sess,
/* Send the request */
status = pj_stun_session_send_msg(sess->stun_sess, NULL, PJ_TRUE,
- PJ_TRUE, sess->cur_server,
+ (pj_stun_session_tp_type(sess->stun_sess) ==
+ PJ_STUN_TP_UDP),
+ sess->cur_server,
pj_sockaddr_get_len(sess->cur_server),
sess->result[test_id].tdata);
if (status != PJ_SUCCESS)
diff --git a/pjnath/src/pjnath/stun_session.c b/pjnath/src/pjnath/stun_session.c
index f2b4f7058..ed17b904f 100644
--- a/pjnath/src/pjnath/stun_session.c
+++ b/pjnath/src/pjnath/stun_session.c
@@ -49,6 +49,8 @@ struct pj_stun_session
pj_stun_tx_data pending_request_list;
pj_stun_tx_data cached_response_list;
+
+ pj_stun_tp_type conn_type;
};
#define SNAME(s_) ((s_)->pool->obj_name)
@@ -524,7 +526,8 @@ PJ_DEF(pj_status_t) pj_stun_session_create( pj_stun_config *cfg,
const pj_stun_session_cb *cb,
pj_bool_t fingerprint,
pj_grp_lock_t *grp_lock,
- pj_stun_session **p_sess)
+ pj_stun_session **p_sess,
+ pj_stun_tp_type conn_type)
{
pj_pool_t *pool;
pj_stun_session *sess;
@@ -545,6 +548,7 @@ PJ_DEF(pj_status_t) pj_stun_session_create( pj_stun_config *cfg,
pj_memcpy(&sess->cb, cb, sizeof(*cb));
sess->use_fingerprint = fingerprint;
sess->log_flag = 0xFFFF;
+ sess->conn_type = conn_type;
if (grp_lock) {
sess->grp_lock = grp_lock;
@@ -1538,3 +1542,12 @@ on_return:
return status;
}
+PJ_DECL(pj_stun_session_cb *) pj_stun_session_callback(pj_stun_session *sess)
+{
+ return sess ? &sess->cb : NULL;
+}
+
+PJ_DECL(pj_stun_tp_type) pj_stun_session_tp_type(pj_stun_session *sess)
+{
+ return sess ? sess->conn_type : PJ_STUN_TP_UDP;
+}
diff --git a/pjnath/src/pjnath/stun_sock.c b/pjnath/src/pjnath/stun_sock.c
index 5fe825cf5..e5b91dd45 100644
--- a/pjnath/src/pjnath/stun_sock.c
+++ b/pjnath/src/pjnath/stun_sock.c
@@ -40,6 +40,36 @@
enum { MAX_BIND_RETRY = 100 };
+#if PJ_HAS_TCP
+// The head of a RTP packet is stored in a 16 bits header, so the max size of a
+// packet is 65536
+#define MAX_RTP_SIZE 65536
+#endif
+
+// TODO (sblin) The incoming socks are a bit HACKY for now.
+// Need a better approach
+typedef struct outgoing_sock {
+ pj_sock_t fd;
+ pj_activesock_t *sock;
+ pj_sockaddr_t *addr;
+} outgoing_sock;
+
+typedef struct incoming_sock {
+ pj_sock_t fd;
+ pj_activesock_t *sock;
+ pj_sockaddr addr;
+ int addr_len;
+} incoming_sock;
+
+typedef struct rx_buf {
+ pj_activesock_t *asock;
+ pj_uint8_t rx_buffer[MAX_RTP_SIZE];
+ pj_uint16_t rx_buffer_size;
+ pj_uint16_t rx_wanted_size;
+ struct rx_buf *next;
+ struct rx_buf *prev;
+} rx_buf;
+
struct pj_stun_sock
{
char *obj_name; /* Log identification */
@@ -47,6 +77,8 @@ struct pj_stun_sock
void *user_data; /* Application user data */
pj_bool_t is_destroying; /* Destroy already called */
int af; /* Address family */
+ pj_stun_tp_type conn_type;
+ pj_stun_sock_cfg cfg;
pj_stun_config stun_cfg; /* STUN config (ioqueue etc)*/
pj_stun_sock_cb cb; /* Application callbacks */
@@ -59,6 +91,13 @@ struct pj_stun_sock
pj_dns_srv_async_query *q; /* Pending DNS query */
pj_sock_t sock_fd; /* Socket descriptor */
pj_activesock_t *active_sock; /* Active socket object */
+#if PJ_HAS_TCP
+ int outgoing_nb;
+ outgoing_sock outgoing_socks[PJ_ICE_MAX_CHECKS];
+ int incoming_nb;
+ incoming_sock incoming_socks[PJ_ICE_MAX_CHECKS];
+ rx_buf *rx_buffers;
+#endif
pj_ioqueue_op_key_t send_key; /* Default send key for app */
pj_ioqueue_op_key_t int_send_key; /* Send key for internal */
pj_status_t last_err; /* Last error status */
@@ -68,6 +107,15 @@ struct pj_stun_sock
pj_grp_lock_t *grp_lock; /* Session group lock */
};
+//////////////////////////////////////////////////////////////////////////////
+
+static pj_uint16_t GETVAL16H(const pj_uint8_t *buf1, const pj_uint8_t *buf2)
+{
+ return (pj_uint16_t) ((buf1[0] << 8) | (buf2[0] << 0));
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
/*
* Prototypes for static functions
*/
@@ -120,6 +168,24 @@ static void start_ka_timer(pj_stun_sock *stun_sock);
/* Keep-alive timer callback */
static void ka_timer_cb(pj_timer_heap_t *th, pj_timer_entry *te);
+
+static pj_bool_t on_stun_sock_ready(pj_activesock_t *asock,
+ pj_status_t status);
+
+static pj_bool_t on_stun_sock_accept(pj_activesock_t *asock,
+ pj_sock_t newsock,
+ const pj_sockaddr_t *src_addr,
+ int src_addr_len);
+
+static pj_bool_t on_connect_complete(pj_activesock_t *asock,
+ pj_status_t status);
+
+/* Notify application that session has failed */
+static pj_bool_t sess_fail(pj_stun_sock *stun_sock,
+ pj_stun_sock_op op,
+ pj_status_t status);
+
+
#define INTERNAL_MSG_TOKEN (void*)(pj_ssize_t)1
@@ -160,12 +226,183 @@ static pj_bool_t pj_stun_sock_cfg_is_valid(const pj_stun_sock_cfg *cfg)
return cfg->max_pkt_size > 1 && cfg->async_cnt >= 1;
}
+/*
+ * Initialize.
+ */
+PJ_DEF(pj_status_t) pj_stun_sock_alloc(pj_stun_sock *stun_sock)
+{
+ pj_status_t status;
+ pj_sockaddr bound_addr;
+ pj_uint16_t max_bind_retry;
+ int sock_type;
+
+ pj_grp_lock_acquire(stun_sock->grp_lock);
+
+ if (stun_sock->conn_type == PJ_STUN_TP_UDP)
+ sock_type = pj_SOCK_DGRAM();
+ else
+ sock_type = pj_SOCK_STREAM();
+
+ stun_sock->ka_interval = stun_sock->cfg.ka_interval;
+ if (stun_sock->ka_interval == 0)
+ stun_sock->ka_interval = PJ_STUN_KEEP_ALIVE_SEC;
+ /* Create socket and bind socket */
+ status = pj_sock_socket(stun_sock->af, sock_type, 0, &stun_sock->sock_fd);
+ if (status != PJ_SUCCESS) {
+ pj_stun_sock_destroy(stun_sock);
+ pj_grp_lock_release(stun_sock->grp_lock);
+ return status;
+ }
+
+ /* Apply QoS, if specified */
+ status = pj_sock_apply_qos2(stun_sock->sock_fd, stun_sock->cfg.qos_type,
+ &stun_sock->cfg.qos_params, 2,
+ stun_sock->obj_name, NULL);
+ if (status != PJ_SUCCESS && !stun_sock->cfg.qos_ignore_error) {
+ pj_stun_sock_destroy(stun_sock);
+ pj_grp_lock_release(stun_sock->grp_lock);
+ return status;
+ }
+
+ /* Apply socket buffer size */
+ if (stun_sock->cfg.so_rcvbuf_size > 0) {
+ unsigned sobuf_size = stun_sock->cfg.so_rcvbuf_size;
+ status = pj_sock_setsockopt_sobuf(stun_sock->sock_fd, pj_SO_RCVBUF(),
+ PJ_TRUE, &sobuf_size);
+ if (status != PJ_SUCCESS) {
+ pj_perror(3, stun_sock->obj_name, status,
+ "Failed setting SO_RCVBUF");
+ } else {
+ if (sobuf_size < stun_sock->cfg.so_rcvbuf_size) {
+ PJ_LOG(4, (stun_sock->obj_name,
+ "Warning! Cannot set SO_RCVBUF as configured, "
+ "now=%d, configured=%d",
+ sobuf_size, stun_sock->cfg.so_rcvbuf_size));
+ } else {
+ PJ_LOG(5, (stun_sock->obj_name,
+ "SO_RCVBUF set to %d", sobuf_size));
+ }
+ }
+ }
+ if (stun_sock->cfg.so_sndbuf_size > 0) {
+ unsigned sobuf_size = stun_sock->cfg.so_sndbuf_size;
+ status = pj_sock_setsockopt_sobuf(stun_sock->sock_fd, pj_SO_SNDBUF(),
+ PJ_TRUE, &sobuf_size);
+ if (status != PJ_SUCCESS) {
+ pj_perror(3, stun_sock->obj_name, status,
+ "Failed setting SO_SNDBUF");
+ } else {
+ if (sobuf_size < stun_sock->cfg.so_sndbuf_size) {
+ PJ_LOG(4, (stun_sock->obj_name,
+ "Warning! Cannot set SO_SNDBUF as configured, "
+ "now=%d, configured=%d",
+ sobuf_size, stun_sock->cfg.so_sndbuf_size));
+ } else {
+ PJ_LOG(5, (stun_sock->obj_name,
+ "SO_SNDBUF set to %d", sobuf_size));
+ }
+ }
+ }
+
+ /* Bind socket */
+ max_bind_retry = MAX_BIND_RETRY;
+ if (stun_sock->cfg.port_range &&
+ stun_sock->cfg.port_range < max_bind_retry)
+ max_bind_retry = stun_sock->cfg.port_range;
+
+ pj_sockaddr_init(stun_sock->af, &bound_addr, NULL, 0);
+ if (stun_sock->cfg.bound_addr.addr.sa_family == pj_AF_INET() ||
+ stun_sock->cfg.bound_addr.addr.sa_family == pj_AF_INET6())
+ {
+ pj_sockaddr_cp(&bound_addr, &stun_sock->cfg.bound_addr);
+ }
+
+ status = pj_sock_bind_random(stun_sock->sock_fd, &bound_addr,
+ stun_sock->cfg.port_range, max_bind_retry);
+ if (status != PJ_SUCCESS) {
+ pj_stun_sock_destroy(stun_sock);
+ pj_grp_lock_release(stun_sock->grp_lock);
+ return status;
+ }
+
+ /* Init active socket configuration */
+ {
+ pj_activesock_cfg activesock_cfg;
+ pj_activesock_cb activesock_cb;
+
+ pj_activesock_cfg_default(&activesock_cfg);
+ activesock_cfg.grp_lock = stun_sock->grp_lock;
+ activesock_cfg.async_cnt = stun_sock->cfg.async_cnt;
+ activesock_cfg.concurrency = 0;
+
+ /* Create the active socket */
+ pj_bzero(&activesock_cb, sizeof(activesock_cb));
+ activesock_cb.on_data_sent = &on_data_sent;
+ activesock_cb.on_data_recvfrom = &on_data_recvfrom;
+
+#if PJ_HAS_TCP
+ if (stun_sock->conn_type != PJ_STUN_TP_UDP) {
+ activesock_cb.on_accept_complete = &on_stun_sock_accept;
+ // Will be ready to accept incoming connections from the external world
+ status = pj_sock_listen(stun_sock->sock_fd, PJ_SOMAXCONN);
+ if (status != PJ_SUCCESS) {
+ pj_stun_sock_destroy(stun_sock);
+ pj_grp_lock_release(stun_sock->grp_lock);
+ return status;
+ }
+ } else {
+ activesock_cb.on_connect_complete = &on_stun_sock_ready;
+ }
+#else
+ activesock_cb.on_connect_complete = &on_stun_sock_ready;
+#endif
+
+ status = pj_activesock_create(stun_sock->pool, stun_sock->sock_fd,
+ sock_type, &activesock_cfg,
+ stun_sock->stun_cfg.ioqueue,
+ &activesock_cb, stun_sock,
+ &stun_sock->active_sock);
+ if (status != PJ_SUCCESS) {
+ pj_stun_sock_destroy(stun_sock);
+ pj_grp_lock_release(stun_sock->grp_lock);
+ return status;
+ }
+
+#if PJ_HAS_TCP
+ if (stun_sock->conn_type != PJ_STUN_TP_UDP) {
+ status = pj_activesock_start_accept(stun_sock->active_sock,
+ stun_sock->pool);
+ } else {
+ status = PJ_SUCCESS;
+ }
+ if (status == PJ_SUCCESS) {
+ on_stun_sock_ready(stun_sock->active_sock, PJ_SUCCESS);
+ } else if (status != PJ_EPENDING) {
+ char addrinfo[PJ_INET6_ADDRSTRLEN + 10];
+ pj_perror(3, stun_sock->pool->obj_name, status,
+ "Failed to connect to %s",
+ pj_sockaddr_print(&bound_addr, addrinfo,
+ sizeof(addrinfo), 3));
+ pj_stun_sock_destroy(stun_sock);
+ pj_grp_lock_release(stun_sock->grp_lock);
+ return status;
+ }
+#else
+ on_stun_sock_ready(stun_sock->active_sock, PJ_SUCCESS);
+#endif
+ }
+
+ pj_grp_lock_release(stun_sock->grp_lock);
+ return status;
+}
+
/*
* Create the STUN transport using the specified configuration.
*/
PJ_DEF(pj_status_t) pj_stun_sock_create( pj_stun_config *stun_cfg,
const char *name,
int af,
+ pj_stun_tp_type conn_type,
const pj_stun_sock_cb *cb,
const pj_stun_sock_cfg *cfg,
void *user_data,
@@ -174,22 +411,32 @@ PJ_DEF(pj_status_t) pj_stun_sock_create( pj_stun_config *stun_cfg,
pj_pool_t *pool;
pj_stun_sock *stun_sock;
pj_stun_sock_cfg default_cfg;
- pj_sockaddr bound_addr;
- unsigned i;
- pj_uint16_t max_bind_retry;
pj_status_t status;
PJ_ASSERT_RETURN(stun_cfg && cb && p_stun_sock, PJ_EINVAL);
PJ_ASSERT_RETURN(af==pj_AF_INET()||af==pj_AF_INET6(), PJ_EAFNOTSUP);
PJ_ASSERT_RETURN(!cfg || pj_stun_sock_cfg_is_valid(cfg), PJ_EINVAL);
PJ_ASSERT_RETURN(cb->on_status, PJ_EINVAL);
+ PJ_ASSERT_RETURN(conn_type != PJ_STUN_TP_TCP || PJ_HAS_TCP, PJ_EINVAL);
status = pj_stun_config_check_valid(stun_cfg);
if (status != PJ_SUCCESS)
return status;
- if (name == NULL)
- name = "stuntp%p";
+ if (name == NULL) {
+ switch (conn_type) {
+ case PJ_STUN_TP_UDP:
+ name = "udpstun%p";
+ break;
+ case PJ_STUN_TP_TCP:
+ name = "tcpstun%p";
+ break;
+ default:
+ PJ_ASSERT_RETURN(!"Invalid STUN conn_type", PJ_EINVAL);
+ name = "tcpstun%p";
+ break;
+ }
+ }
if (cfg == NULL) {
pj_stun_sock_cfg_default(&default_cfg);
@@ -204,9 +451,16 @@ PJ_DEF(pj_status_t) pj_stun_sock_create( pj_stun_config *stun_cfg,
stun_sock->obj_name = pool->obj_name;
stun_sock->user_data = user_data;
stun_sock->af = af;
+ stun_sock->conn_type = conn_type;
stun_sock->sock_fd = PJ_INVALID_SOCKET;
+#if PJ_HAS_TCP
+ stun_sock->outgoing_nb = -1;
+ stun_sock->incoming_nb = -1;
+#endif
pj_memcpy(&stun_sock->stun_cfg, stun_cfg, sizeof(*stun_cfg));
pj_memcpy(&stun_sock->cb, cb, sizeof(*cb));
+ /* Copy socket settings; QoS parameters etc */
+ pj_memcpy(&stun_sock->cfg, cfg, sizeof(*cfg));
stun_sock->ka_interval = cfg->ka_interval;
if (stun_sock->ka_interval == 0)
@@ -226,140 +480,68 @@ PJ_DEF(pj_status_t) pj_stun_sock_create( pj_stun_config *stun_cfg,
pj_grp_lock_add_handler(stun_sock->grp_lock, pool, stun_sock,
&stun_sock_destructor);
- /* Create socket and bind socket */
- status = pj_sock_socket(af, pj_SOCK_DGRAM(), 0, &stun_sock->sock_fd);
- if (status != PJ_SUCCESS)
- goto on_error;
-
- /* Apply QoS, if specified */
- status = pj_sock_apply_qos2(stun_sock->sock_fd, cfg->qos_type,
- &cfg->qos_params, 2, stun_sock->obj_name,
- NULL);
- if (status != PJ_SUCCESS && !cfg->qos_ignore_error)
- goto on_error;
+ /* Create STUN session */
+ {
+ pj_stun_session_cb sess_cb;
- /* Apply socket buffer size */
- if (cfg->so_rcvbuf_size > 0) {
- unsigned sobuf_size = cfg->so_rcvbuf_size;
- status = pj_sock_setsockopt_sobuf(stun_sock->sock_fd, pj_SO_RCVBUF(),
- PJ_TRUE, &sobuf_size);
- if (status != PJ_SUCCESS) {
- PJ_PERROR(3, (stun_sock->obj_name, status,
- "Failed setting SO_RCVBUF"));
- } else {
- if (sobuf_size < cfg->so_rcvbuf_size) {
- PJ_LOG(4, (stun_sock->obj_name,
- "Warning! Cannot set SO_RCVBUF as configured, "
- "now=%d, configured=%d",
- sobuf_size, cfg->so_rcvbuf_size));
- } else {
- PJ_LOG(5, (stun_sock->obj_name, "SO_RCVBUF set to %d",
- sobuf_size));
- }
- }
- }
- if (cfg->so_sndbuf_size > 0) {
- unsigned sobuf_size = cfg->so_sndbuf_size;
- status = pj_sock_setsockopt_sobuf(stun_sock->sock_fd, pj_SO_SNDBUF(),
- PJ_TRUE, &sobuf_size);
+ pj_bzero(&sess_cb, sizeof(sess_cb));
+ sess_cb.on_request_complete = &sess_on_request_complete;
+ sess_cb.on_send_msg = &sess_on_send_msg;
+ status = pj_stun_session_create(&stun_sock->stun_cfg,
+ stun_sock->obj_name,
+ &sess_cb, PJ_FALSE,
+ stun_sock->grp_lock,
+ &stun_sock->stun_sess,
+ conn_type);
if (status != PJ_SUCCESS) {
- PJ_PERROR(3, (stun_sock->obj_name, status,
- "Failed setting SO_SNDBUF"));
- } else {
- if (sobuf_size < cfg->so_sndbuf_size) {
- PJ_LOG(4, (stun_sock->obj_name,
- "Warning! Cannot set SO_SNDBUF as configured, "
- "now=%d, configured=%d",
- sobuf_size, cfg->so_sndbuf_size));
- } else {
- PJ_LOG(5, (stun_sock->obj_name, "SO_SNDBUF set to %d",
- sobuf_size));
- }
+ pj_stun_sock_destroy(stun_sock);
+ return status;
}
}
- /* Bind socket */
- max_bind_retry = MAX_BIND_RETRY;
- if (cfg->port_range && cfg->port_range < max_bind_retry)
- max_bind_retry = cfg->port_range;
- pj_sockaddr_init(af, &bound_addr, NULL, 0);
- if (cfg->bound_addr.addr.sa_family == pj_AF_INET() ||
- cfg->bound_addr.addr.sa_family == pj_AF_INET6())
- {
- pj_sockaddr_cp(&bound_addr, &cfg->bound_addr);
- }
- status = pj_sock_bind_random(stun_sock->sock_fd, &bound_addr,
- cfg->port_range, max_bind_retry);
- if (status != PJ_SUCCESS)
- goto on_error;
+ pj_stun_sock_alloc(stun_sock);
- /* Create more useful information string about this transport */
-#if 0
- {
- pj_sockaddr bound_addr;
- int addr_len = sizeof(bound_addr);
+ /* Done */
+ *p_stun_sock = stun_sock;
+ return PJ_SUCCESS;
+}
- status = pj_sock_getsockname(stun_sock->sock_fd, &bound_addr,
- &addr_len);
- if (status != PJ_SUCCESS)
- goto on_error;
+/*
+ * Notification when outgoing TCP socket has been connected.
+ */
+static pj_bool_t on_stun_sock_ready(pj_activesock_t *asock, pj_status_t status)
+{
+ pj_stun_sock *stun_sock;
+ stun_sock = (pj_stun_sock *)pj_activesock_get_user_data(asock);
+ if (!stun_sock)
+ return PJ_FALSE;
+
+ pj_grp_lock_acquire(stun_sock->grp_lock);
- stun_sock->info = pj_pool_alloc(pool, PJ_INET6_ADDRSTRLEN+10);
- pj_sockaddr_print(&bound_addr, stun_sock->info,
- PJ_INET6_ADDRSTRLEN, 3);
+ /* TURN session may have already been destroyed here.
+ * See ticket #1557 (http://trac.pjsip.org/repos/ticket/1557).
+ */
+ if (!stun_sock->stun_sess) {
+ sess_fail(stun_sock, PJ_STUN_SESS_DESTROYED, status);
+ pj_grp_lock_release(stun_sock->grp_lock);
+ return PJ_FALSE;
}
-#endif
- /* Init active socket configuration */
- {
- pj_activesock_cfg activesock_cfg;
- pj_activesock_cb activesock_cb;
-
- pj_activesock_cfg_default(&activesock_cfg);
- activesock_cfg.grp_lock = stun_sock->grp_lock;
- activesock_cfg.async_cnt = cfg->async_cnt;
- activesock_cfg.concurrency = 0;
-
- /* Create the active socket */
- pj_bzero(&activesock_cb, sizeof(activesock_cb));
- activesock_cb.on_data_recvfrom = &on_data_recvfrom;
- activesock_cb.on_data_sent = &on_data_sent;
- status = pj_activesock_create(pool, stun_sock->sock_fd,
- pj_SOCK_DGRAM(),
- &activesock_cfg, stun_cfg->ioqueue,
- &activesock_cb, stun_sock,
- &stun_sock->active_sock);
- if (status != PJ_SUCCESS)
- goto on_error;
-
- /* Start asynchronous read operations */
- status = pj_activesock_start_recvfrom(stun_sock->active_sock, pool,
- cfg->max_pkt_size, 0);
- if (status != PJ_SUCCESS)
- goto on_error;
-
- /* Init send keys */
- pj_ioqueue_op_key_init(&stun_sock->send_key,
- sizeof(stun_sock->send_key));
- pj_ioqueue_op_key_init(&stun_sock->int_send_key,
- sizeof(stun_sock->int_send_key));
+ if (status != PJ_SUCCESS) {
+ sess_fail(stun_sock, PJ_STUN_TCP_CONNECT_ERROR, status);
+ pj_grp_lock_release(stun_sock->grp_lock);
+ return PJ_FALSE;
}
- /* Create STUN session */
- {
- pj_stun_session_cb sess_cb;
+ if (stun_sock->conn_type != PJ_STUN_TP_UDP)
+ PJ_LOG(5,(stun_sock->obj_name, "TCP connected"));
- pj_bzero(&sess_cb, sizeof(sess_cb));
- sess_cb.on_request_complete = &sess_on_request_complete;
- sess_cb.on_send_msg = &sess_on_send_msg;
- status = pj_stun_session_create(&stun_sock->stun_cfg,
- stun_sock->obj_name,
- &sess_cb, PJ_FALSE,
- stun_sock->grp_lock,
- &stun_sock->stun_sess);
- if (status != PJ_SUCCESS)
- goto on_error;
- }
+ /* Start asynchronous read operations */
+ pj_status_t result;
+ result = pj_activesock_start_recvfrom(asock, stun_sock->pool,
+ stun_sock->cfg.max_pkt_size, 0);
+ if (result != PJ_SUCCESS)
+ return PJ_FALSE;
/* Associate us with the STUN session */
pj_stun_session_set_user_data(stun_sock->stun_sess, stun_sock);
@@ -369,25 +551,305 @@ PJ_DEF(pj_status_t) pj_stun_sock_create( pj_stun_config *stun_cfg,
* STUN messages we sent with STUN messages that the application sends.
* The last 16bit value in the array is a counter.
*/
+ unsigned i;
for (i=0; i<PJ_ARRAY_SIZE(stun_sock->tsx_id); ++i) {
stun_sock->tsx_id[i] = (pj_uint16_t) pj_rand();
}
stun_sock->tsx_id[5] = 0;
-
/* Init timer entry */
stun_sock->ka_timer.cb = &ka_timer_cb;
stun_sock->ka_timer.user_data = stun_sock;
- /* Done */
- *p_stun_sock = stun_sock;
- return PJ_SUCCESS;
+ if (status != PJ_SUCCESS) {
+ pj_stun_sock_destroy(stun_sock);
+ pj_grp_lock_release(stun_sock->grp_lock);
+ return status;
+ }
-on_error:
- pj_stun_sock_destroy(stun_sock);
- return status;
+ /* Init send keys */
+ pj_ioqueue_op_key_init(&stun_sock->send_key, sizeof(stun_sock->send_key));
+ pj_ioqueue_op_key_init(&stun_sock->int_send_key,
+ sizeof(stun_sock->int_send_key));
+
+ pj_grp_lock_release(stun_sock->grp_lock);
+ return PJ_TRUE;
}
+static pj_bool_t parse_rx_packet(pj_activesock_t *asock,
+ void *data,
+ pj_size_t size,
+ const pj_sockaddr_t *rx_addr,
+ unsigned sock_addr_len)
+{
+
+ pj_stun_sock *stun_sock = (pj_stun_sock*) pj_activesock_get_user_data(asock);
+ if (!stun_sock)
+ return PJ_FALSE;
+
+ pj_grp_lock_acquire(stun_sock->grp_lock);
+ pj_uint16_t parsed = 0;
+ pj_status_t result = PJ_TRUE;
+ pj_status_t status;
+
+#if PJ_HAS_TCP
+ // Search current rx_buf
+ rx_buf* buf = NULL;
+ rx_buf* stun_sock_buf = stun_sock->rx_buffers;
+ while (stun_sock_buf) {
+ if (stun_sock_buf->asock == asock) {
+ buf = stun_sock_buf;
+ break;
+ }
+ stun_sock_buf = stun_sock_buf->next;
+ }
+ if (!buf) {
+ // Create rx_buf, this buf will be released when the pool is released
+ buf = (rx_buf*)pj_pool_calloc(stun_sock->pool, 1, sizeof(rx_buf));
+ if (!buf) {
+ PJ_LOG(5, (stun_sock->obj_name, "Cannot allocate memory for rx_buf"));
+ status = pj_grp_lock_release(stun_sock->grp_lock);
+ return PJ_FALSE;
+ }
+ buf->asock = asock;
+ buf->next = stun_sock->rx_buffers;
+ if (stun_sock->rx_buffers)
+ stun_sock->rx_buffers->prev = buf;
+ stun_sock->rx_buffers = buf;
+ }
+#endif
+
+ do {
+ pj_uint16_t leftover = size - parsed;
+ pj_uint8_t *current_packet = ((pj_uint8_t *)(data)) + parsed;
+
+#if PJ_HAS_TCP
+ if (stun_sock->conn_type != PJ_STUN_TP_UDP) {
+ /* RFC6544, the packet is wrapped into a packet following the RFC4571 */
+ pj_bool_t store_remaining = PJ_TRUE;
+ if (buf->rx_buffer_size != 0 || buf->rx_wanted_size != 0) {
+ if (buf->rx_buffer_size == 1 && buf->rx_wanted_size == 0) {
+ // In this case, we want to know the header size
+ leftover = GETVAL16H(buf->rx_buffer, current_packet);
+
+ buf->rx_buffer_size = 0;
+ current_packet++;
+ parsed++;
+
+ if (leftover + parsed <= size) {
+ store_remaining = PJ_FALSE;
+ parsed += leftover;
+ } else {
+ buf->rx_wanted_size = leftover;
+ }
+
+ } else if (leftover + buf->rx_buffer_size >= buf->rx_wanted_size) {
+ // We have enough data Build new packet to parse
+ store_remaining = PJ_FALSE;
+ pj_uint16_t eaten_bytes = buf->rx_wanted_size - buf->rx_buffer_size;
+ pj_memcpy(buf->rx_buffer + buf->rx_buffer_size,
+ current_packet, eaten_bytes);
+
+ leftover = buf->rx_wanted_size;
+ current_packet = buf->rx_buffer;
+ parsed += eaten_bytes;
+
+ buf->rx_buffer_size = 0;
+ buf->rx_wanted_size = 0;
+ }
+ } else if (leftover > 1) {
+ leftover = GETVAL16H(current_packet, current_packet+1);
+ current_packet += 2;
+ parsed += 2;
+ if (leftover + parsed <= size) {
+ store_remaining = PJ_FALSE;
+ parsed += leftover;
+ } else {
+ buf->rx_wanted_size = leftover;
+ }
+ }
+ if (store_remaining) {
+ leftover = size - parsed;
+ pj_memcpy(buf->rx_buffer + buf->rx_buffer_size,
+ current_packet, leftover);
+ buf->rx_buffer_size += leftover;
+ break;
+ }
+ } else {
+#endif
+ parsed = size;
+#if PJ_HAS_TCP
+ }
+#endif
+ /* Check that this is STUN message */
+ status = pj_stun_msg_check((const pj_uint8_t *)current_packet, leftover,
+ PJ_STUN_IS_DATAGRAM | PJ_STUN_CHECK_PACKET);
+ if (status != PJ_SUCCESS) {
+ /* Not STUN -- give it to application */
+ goto process_app_data;
+ }
+
+ /* Treat packet as STUN header and copy the STUN message type.
+ * We don't want to access the type directly from the header
+ * since it may not be properly aligned.
+ */
+ pj_stun_msg_hdr *hdr = (pj_stun_msg_hdr *)current_packet;
+ pj_uint16_t type;
+ pj_memcpy(&type, &hdr->type, 2);
+ type = pj_ntohs(type);
+
+ /* If the packet is a STUN Binding response and part of the
+ * transaction ID matches our internal ID, then this is
+ * our internal STUN message (Binding request or keep alive).
+ * Give it to our STUN session.
+ */
+ if (!PJ_STUN_IS_RESPONSE(type) ||
+ PJ_STUN_GET_METHOD(type) != PJ_STUN_BINDING_METHOD ||
+ pj_memcmp(hdr->tsx_id, stun_sock->tsx_id, 10) != 0)
+ {
+ /* Not STUN Binding response, or STUN transaction ID mismatch.
+ * This is not our message too -- give it to application.
+ */
+ goto process_app_data;
+ }
+
+ /* This is our STUN Binding response. Give it to the STUN session */
+ status = pj_stun_session_on_rx_pkt(stun_sock->stun_sess, current_packet,
+ leftover, PJ_STUN_IS_DATAGRAM, NULL,
+ NULL, rx_addr, sock_addr_len);
+
+ result &= status != PJ_EGONE ? PJ_TRUE : PJ_FALSE;
+ continue;
+
+process_app_data:
+ if (stun_sock->cb.on_rx_data)
+ (*stun_sock->cb.on_rx_data)(stun_sock, current_packet,
+ (unsigned)leftover, rx_addr, sock_addr_len);
+
+ result &= status != PJ_EGONE ? PJ_TRUE : PJ_FALSE;
+ } while (parsed < size && result);
+
+ status = pj_grp_lock_release(stun_sock->grp_lock);
+ return result;
+}
+
+static pj_bool_t on_data_read(pj_activesock_t *asock,
+ void *data,
+ pj_size_t size,
+ pj_status_t status,
+ pj_size_t *remainder)
+{
+
+ pj_stun_sock *stun_sock;
+
+ if (!(stun_sock = (pj_stun_sock *)pj_activesock_get_user_data(asock)))
+ return PJ_FALSE;
+
+ pj_stun_session_cb *cb = pj_stun_session_callback(stun_sock->stun_sess);
+ /* Log socket error or disconnection */
+ if (status != PJ_SUCCESS) {
+ if (stun_sock->conn_type == PJ_STUN_TP_UDP
+ || (status != PJ_EEOF && status != 120104 && status != 130054))
+ {
+ PJ_PERROR(2, (stun_sock->obj_name, status, "read() error"));
+ } else if (status == 120104
+ || status == 130054 /* RESET BY PEER */)
+ {
+ for (int i = 0; i <= stun_sock->outgoing_nb; ++i)
+ if (stun_sock->outgoing_socks[i].sock == asock
+ && cb
+ && (cb->on_peer_reset_connection))
+ {
+ (cb->on_peer_reset_connection)(stun_sock->stun_sess,
+ stun_sock->outgoing_socks[i].addr);
+ }
+ }
+ return PJ_FALSE;
+ }
+#if PJ_HAS_TCP
+ pj_sockaddr_t *rx_addr = NULL;
+ unsigned sock_addr_len = 0;
+ for (int i = 0; i <= stun_sock->outgoing_nb; ++i)
+ if (stun_sock->outgoing_socks[i].sock == asock) {
+ rx_addr = stun_sock->outgoing_socks[i].addr;
+ sock_addr_len = pj_sockaddr_get_len(rx_addr);
+ if (cb && (cb->on_peer_packet))
+ (cb->on_peer_packet)(stun_sock->stun_sess,
+ stun_sock->outgoing_socks[i].addr);
+ }
+
+ if (rx_addr == NULL && stun_sock->incoming_nb != -1) {
+ // It's an incoming message
+ for (int i = 0; i <= stun_sock->incoming_nb; ++i)
+ if (stun_sock->incoming_socks[i].sock == asock) {
+ rx_addr = &stun_sock->incoming_socks[i].addr;
+ sock_addr_len = stun_sock->incoming_socks[i].addr_len;
+ }
+ }
+ return parse_rx_packet(asock, data, size, rx_addr, sock_addr_len);
+#else
+ pj_grp_lock_release(stun_sock->grp_lock);
+ return PJ_FALSE;
+#endif
+}
+
+#if PJ_HAS_TCP
+/*
+ * Notification when incoming TCP socket has been connected.
+ * NOTE: cf https://www.pjsip.org/docs/latest/pjlib/docs/html//structpj__activesock__cb.htm if status needed
+ */
+static pj_bool_t on_stun_sock_accept(pj_activesock_t *active_sock,
+ pj_sock_t sock,
+ const pj_sockaddr_t *src_addr,
+ int src_addr_len)
+{
+ pj_status_t status;
+ pj_stun_sock *stun_sock;
+ int sock_type = pj_SOCK_STREAM();
+ stun_sock = (pj_stun_sock *)pj_activesock_get_user_data(active_sock);
+
+ stun_sock->incoming_nb += 1;
+ int nb_check = stun_sock->incoming_nb;
+ pj_sock_t *fd = &stun_sock->incoming_socks[nb_check].fd;
+ pj_activesock_t **asock = &stun_sock->incoming_socks[nb_check].sock;
+
+ pj_sockaddr_cp(&stun_sock->incoming_socks[nb_check].addr, src_addr);
+ stun_sock->incoming_socks[nb_check].addr_len = src_addr_len;
+ *fd = sock;
+
+ pj_activesock_cfg activesock_cfg;
+ pj_activesock_cb activesock_cb;
+
+ pj_activesock_cfg_default(&activesock_cfg);
+ activesock_cfg.grp_lock = stun_sock->grp_lock;
+ activesock_cfg.async_cnt = stun_sock->cfg.async_cnt;
+ activesock_cfg.concurrency = 0;
+
+ /* Create the active socket */
+ pj_bzero(&activesock_cb, sizeof(activesock_cb));
+ activesock_cb.on_data_read = &on_data_read;
+ activesock_cb.on_data_sent = &on_data_sent;
+
+ status = pj_activesock_create(stun_sock->pool, *fd, sock_type,
+ &activesock_cfg, stun_sock->stun_cfg.ioqueue,
+ &activesock_cb, stun_sock, asock);
+ if (status != PJ_SUCCESS) {
+ pj_stun_sock_destroy(stun_sock);
+ pj_grp_lock_release(stun_sock->grp_lock);
+ return status;
+ }
+
+ /* Start asynchronous read operations */
+ pj_status_t result;
+ result = pj_activesock_start_read(*asock, stun_sock->pool,
+ stun_sock->cfg.max_pkt_size, 0);
+ if (result != PJ_SUCCESS)
+ return PJ_FALSE;
+
+ return PJ_TRUE;
+}
+#endif
+
/* Start socket. */
PJ_DEF(pj_status_t) pj_stun_sock_start( pj_stun_sock *stun_sock,
const pj_str_t *domain,
@@ -526,6 +988,26 @@ PJ_DEF(pj_status_t) pj_stun_sock_destroy(pj_stun_sock *stun_sock)
stun_sock->sock_fd = PJ_INVALID_SOCKET;
}
+ for (int i = 0; i < stun_sock->incoming_nb ; ++i) {
+ if (stun_sock->incoming_socks[i].sock != NULL) {
+ stun_sock->incoming_socks[i].fd = PJ_INVALID_SOCKET;
+ pj_activesock_close(stun_sock->incoming_socks[i].sock);
+ } else if (stun_sock->incoming_socks[i].fd != PJ_INVALID_SOCKET) {
+ pj_sock_close(stun_sock->incoming_socks[i].fd);
+ stun_sock->incoming_socks[i].fd = PJ_INVALID_SOCKET;
+ }
+ }
+
+ for (int i = 0; i < stun_sock->outgoing_nb ; ++i) {
+ if (stun_sock->outgoing_socks[i].sock != NULL) {
+ stun_sock->outgoing_socks[i].fd = PJ_INVALID_SOCKET;
+ pj_activesock_close(stun_sock->outgoing_socks[i].sock);
+ } else if (stun_sock->outgoing_socks[i].fd != PJ_INVALID_SOCKET) {
+ pj_sock_close(stun_sock->outgoing_socks[i].fd);
+ stun_sock->outgoing_socks[i].fd = PJ_INVALID_SOCKET;
+ }
+ }
+
if (stun_sock->stun_sess) {
pj_stun_session_destroy(stun_sock->stun_sess);
}
@@ -634,10 +1116,12 @@ static pj_status_t get_mapped_addr(pj_stun_sock *stun_sock)
/* Send request */
status=pj_stun_session_send_msg(stun_sock->stun_sess, INTERNAL_MSG_TOKEN,
- PJ_FALSE, PJ_TRUE, &stun_sock->srv_addr,
+ PJ_FALSE,
+ (stun_sock->conn_type == PJ_STUN_TP_UDP),
+ &stun_sock->srv_addr,
pj_sockaddr_get_len(&stun_sock->srv_addr),
tdata);
- if (status != PJ_SUCCESS)
+ if (status != PJ_SUCCESS && status != PJ_EPENDING)
goto on_error;
return PJ_SUCCESS;
@@ -658,6 +1142,8 @@ PJ_DEF(pj_status_t) pj_stun_sock_get_info( pj_stun_sock *stun_sock,
pj_grp_lock_acquire(stun_sock->grp_lock);
+ info->conn_type = stun_sock->conn_type;
+
/* Copy STUN server address and mapped address */
pj_memcpy(&info->srv_addr, &stun_sock->srv_addr,
sizeof(pj_sockaddr));
@@ -770,13 +1256,247 @@ PJ_DEF(pj_status_t) pj_stun_sock_sendto( pj_stun_sock *stun_sock,
send_key = &stun_sock->send_key;
size = pkt_len;
- status = pj_activesock_sendto(stun_sock->active_sock, send_key,
- pkt, &size, flag, dst_addr, addr_len);
+ if (stun_sock->conn_type == PJ_STUN_TP_UDP) {
+ status = pj_activesock_sendto(stun_sock->active_sock, send_key,
+ pkt, &size, flag, dst_addr, addr_len);
+ } else {
+#if PJ_HAS_TCP
+ pj_bool_t is_outgoing = PJ_FALSE;
+ pj_bool_t is_incoming = PJ_FALSE;
+ for (int i = 0; i <= stun_sock->outgoing_nb; ++i) {
+ if (stun_sock->outgoing_socks[i].sock != NULL
+ && pj_sockaddr_cmp(stun_sock->outgoing_socks[i].addr, dst_addr) == 0) {
+ is_outgoing = PJ_TRUE;
+ status = pj_activesock_send(stun_sock->outgoing_socks[i].sock,
+ send_key, pkt, &size, flag);
+ break;
+ }
+ }
+ if (is_outgoing == PJ_FALSE) {
+ for (int i = 0 ; i <= stun_sock->incoming_nb; ++i) {
+ if (stun_sock->incoming_socks[i].sock != NULL
+ && pj_sockaddr_cmp(&stun_sock->incoming_socks[i].addr,
+ dst_addr) == 0) {
+ status = pj_activesock_send(stun_sock->incoming_socks[i].sock,
+ send_key, pkt, &size, flag);
+ is_incoming = PJ_TRUE;
+ break;
+ }
+ }
+ }
+ if (is_outgoing == PJ_FALSE && is_incoming == PJ_FALSE) {
+ status = pj_activesock_send(stun_sock->active_sock, send_key, pkt,
+ &size, flag);
+ }
+
+#endif
+ }
+
+ pj_grp_lock_release(stun_sock->grp_lock);
+ return status;
+}
+
+#if PJ_HAS_TCP
+
+PJ_DECL(pj_status_t) pj_stun_sock_connect(pj_stun_sock *stun_sock,
+ const pj_sockaddr_t *remote_addr,
+ int af,
+ int nb_check)
+{
+
+ pj_grp_lock_acquire(stun_sock->grp_lock);
+ int sock_type = pj_SOCK_STREAM();
+
+ pj_sock_t *fd = &stun_sock->outgoing_socks[nb_check].fd;
+ pj_activesock_t **asock = &stun_sock->outgoing_socks[nb_check].sock;
+ pj_sockaddr_t **addr = &stun_sock->outgoing_socks[nb_check].addr;
+
+ pj_status_t status = pj_sock_socket(af, sock_type, 0, fd);
+ if (status != PJ_SUCCESS) {
+ pj_stun_sock_destroy(stun_sock);
+ pj_grp_lock_release(stun_sock->grp_lock);
+ return status;
+ }
+
+ /* Apply QoS, if specified */
+ status = pj_sock_apply_qos2(*fd, stun_sock->cfg.qos_type,
+ &stun_sock->cfg.qos_params, 2, stun_sock->obj_name, NULL);
+ if (status != PJ_SUCCESS && !stun_sock->cfg.qos_ignore_error) {
+ pj_stun_sock_destroy(stun_sock);
+ pj_grp_lock_release(stun_sock->grp_lock);
+ return status;
+ }
+
+ /* Apply socket buffer size */
+ if (stun_sock->cfg.so_rcvbuf_size > 0) {
+ unsigned sobuf_size = stun_sock->cfg.so_rcvbuf_size;
+ status = pj_sock_setsockopt_sobuf(*fd, pj_SO_RCVBUF(), PJ_TRUE, &sobuf_size);
+ if (status != PJ_SUCCESS) {
+ pj_perror(3, stun_sock->obj_name, status, "Failed setting SO_RCVBUF");
+ } else {
+ if (sobuf_size < stun_sock->cfg.so_rcvbuf_size) {
+ PJ_LOG(4, (stun_sock->obj_name,
+ "Warning! Cannot set SO_RCVBUF as configured, "
+ "now=%d, configured=%d",
+ sobuf_size, stun_sock->cfg.so_rcvbuf_size));
+ } else {
+ PJ_LOG(5, (stun_sock->obj_name, "SO_RCVBUF set to %d", sobuf_size));
+ }
+ }
+ }
+
+ if (stun_sock->cfg.so_sndbuf_size > 0) {
+ unsigned sobuf_size = stun_sock->cfg.so_sndbuf_size;
+ status = pj_sock_setsockopt_sobuf(*fd, pj_SO_SNDBUF(), PJ_TRUE, &sobuf_size);
+ if (status != PJ_SUCCESS) {
+ pj_perror(3, stun_sock->obj_name, status, "Failed setting SO_SNDBUF");
+ } else {
+ if (sobuf_size < stun_sock->cfg.so_sndbuf_size) {
+ PJ_LOG(4, (stun_sock->obj_name,
+ "Warning! Cannot set SO_SNDBUF as configured, "
+ "now=%d, configured=%d",
+ sobuf_size, stun_sock->cfg.so_sndbuf_size));
+ } else {
+ PJ_LOG(5, (stun_sock->obj_name, "SO_SNDBUF set to %d", sobuf_size));
+ }
+ }
+ }
+
+ /* Init active socket configuration */
+ {
+ pj_activesock_cfg activesock_cfg;
+ pj_activesock_cb activesock_cb;
+
+ pj_activesock_cfg_default(&activesock_cfg);
+ activesock_cfg.grp_lock = stun_sock->grp_lock;
+ activesock_cfg.async_cnt = stun_sock->cfg.async_cnt;
+ activesock_cfg.concurrency = 0;
+
+ /* Create the active socket */
+ pj_bzero(&activesock_cb, sizeof(activesock_cb));
+ activesock_cb.on_data_read = &on_data_read;
+ activesock_cb.on_data_sent = &on_data_sent;
+ activesock_cb.on_connect_complete = &on_connect_complete;
+
+ status = pj_activesock_create(stun_sock->pool, *fd,
+ sock_type, &activesock_cfg,
+ stun_sock->stun_cfg.ioqueue, &activesock_cb,
+ stun_sock, asock);
+
+ if (status != PJ_SUCCESS) {
+ pj_grp_lock_release(stun_sock->grp_lock);
+ return status;
+ }
+
+ *addr = (pj_sockaddr_t*)remote_addr;
+
+ status = pj_activesock_start_connect(
+ *asock, stun_sock->pool, *addr,
+ pj_sockaddr_get_len(*addr));
+ if (status == PJ_SUCCESS) {
+ on_connect_complete(*asock, status);
+ } else if (status != PJ_EPENDING) {
+ char addrinfo[PJ_INET6_ADDRSTRLEN+8];
+ pj_perror(3, stun_sock->pool->obj_name, status, "Failed to connect to %s",
+ pj_sockaddr_print(*addr, addrinfo, sizeof(addrinfo), 3));
+ pj_grp_lock_release(stun_sock->grp_lock);
+ return status;
+ }
+ }
pj_grp_lock_release(stun_sock->grp_lock);
return status;
}
+PJ_DECL(pj_status_t) pj_stun_sock_connect_active(pj_stun_sock *stun_sock,
+ const pj_sockaddr_t *remote_addr,
+ int af)
+{
+
+ if (stun_sock->incoming_nb != -1) {
+ // Check if not incoming, if so, already connected (mainly for PRFLX candidates)
+ for (int i = 0 ; i <= stun_sock->incoming_nb; ++i) {
+ if (stun_sock->incoming_socks[i].sock != NULL
+ && pj_sockaddr_cmp(&stun_sock->incoming_socks[i].addr, remote_addr)==0) {
+ pj_stun_session_cb *cb =
+ pj_stun_session_callback(stun_sock->stun_sess);
+ (cb->on_peer_connection)(stun_sock->stun_sess, PJ_SUCCESS,
+ (pj_sockaddr_t *)remote_addr);
+ return PJ_SUCCESS;
+ }
+ }
+ }
+
+ /* Create socket and bind socket */
+ stun_sock->outgoing_nb += 1;
+ int nb_check = stun_sock->outgoing_nb;
+ return pj_stun_sock_connect(stun_sock, remote_addr, af, nb_check);
+
+}
+
+PJ_DECL(pj_status_t) pj_stun_sock_reconnect_active(pj_stun_sock *stun_sock,
+ const pj_sockaddr_t *remote_addr,
+ int af)
+{
+ for (int i = 0; i <= stun_sock->outgoing_nb; ++i) {
+ if (stun_sock->outgoing_socks[i].sock != NULL
+ && pj_sockaddr_cmp(stun_sock->outgoing_socks[i].addr, remote_addr) == 0) {
+ pj_activesock_close(stun_sock->outgoing_socks[i].sock);
+ return pj_stun_sock_connect(stun_sock, remote_addr, af, i);
+ }
+ }
+ return PJ_EINVAL;
+}
+
+PJ_DECL(pj_status_t) pj_stun_sock_close(pj_stun_sock *stun_sock,
+ const pj_sockaddr_t *remote_addr)
+{
+ for (int i = 0; i <= stun_sock->outgoing_nb; ++i) {
+ if (stun_sock->outgoing_socks[i].sock != NULL
+ && pj_sockaddr_cmp(stun_sock->outgoing_socks[i].addr, remote_addr) == 0) {
+ return pj_activesock_close(stun_sock->outgoing_socks[i].sock);
+ }
+ }
+
+ for (int i = 0; i <= stun_sock->incoming_nb; ++i) {
+ if (stun_sock->incoming_socks[i].sock != NULL
+ && pj_sockaddr_cmp(&stun_sock->incoming_socks[i].addr, remote_addr) == 0) {
+ return pj_activesock_close(stun_sock->incoming_socks[i].sock);
+ }
+ }
+ return PJ_EINVAL;
+}
+
+static pj_bool_t on_connect_complete(pj_activesock_t *asock, pj_status_t status)
+{
+ pj_stun_sock *stun_sock;
+ stun_sock = (pj_stun_sock *)pj_activesock_get_user_data(asock);
+
+ pj_status_t result = pj_activesock_start_read(asock, stun_sock->pool,
+ stun_sock->cfg.max_pkt_size, 0);
+ if (result != PJ_SUCCESS) {
+ return PJ_FALSE;
+ };
+
+ pj_stun_session_cb *cb = pj_stun_session_callback(stun_sock->stun_sess);
+ if (!cb->on_peer_connection) {
+ return PJ_FALSE;
+ }
+
+ // Get remote connected address
+ pj_sockaddr_t* remote_addr = NULL;
+ for (int i = 0 ; i <= stun_sock->outgoing_nb ; ++i) {
+ if (stun_sock->outgoing_socks[i].sock == asock) {
+ remote_addr = stun_sock->outgoing_socks[i].addr;
+ }
+ }
+ if (!remote_addr) return PJ_FALSE;
+ (cb->on_peer_connection)(stun_sock->stun_sess, status, remote_addr);
+ return PJ_TRUE;
+}
+
+#endif
+
/* This callback is called by the STUN session to send packet */
static pj_status_t sess_on_send_msg(pj_stun_session *sess,
void *token,
@@ -787,6 +1507,7 @@ static pj_status_t sess_on_send_msg(pj_stun_session *sess,
{
pj_stun_sock *stun_sock;
pj_ssize_t size;
+ pj_status_t status;
stun_sock = (pj_stun_sock *) pj_stun_session_get_user_data(sess);
if (!stun_sock || !stun_sock->active_sock) {
@@ -800,9 +1521,30 @@ static pj_status_t sess_on_send_msg(pj_stun_session *sess,
PJ_UNUSED_ARG(token);
size = pkt_size;
- return pj_activesock_sendto(stun_sock->active_sock,
- &stun_sock->int_send_key,
- pkt, &size, 0, dst_addr, addr_len);
+ if (stun_sock->conn_type == PJ_STUN_TP_UDP) {
+ status = pj_activesock_sendto(stun_sock->active_sock,
+ &stun_sock->int_send_key,pkt, &size, 0,
+ dst_addr, addr_len);
+ }
+#if PJ_HAS_TCP
+ else {
+ for (int i = 0 ; i <= stun_sock->incoming_nb; ++i) {
+ if (stun_sock->incoming_socks[i].sock != NULL
+ && !pj_sockaddr_cmp(&stun_sock->incoming_socks[i].addr, dst_addr)) {
+ status = pj_activesock_send(stun_sock->incoming_socks[i].sock,
+ &stun_sock->int_send_key,
+ pkt, &size, 0);
+ if (status != PJ_SUCCESS && status != PJ_EPENDING)
+ PJ_PERROR(4,(stun_sock->obj_name, status,
+ "Error sending answer on incoming_sock(s)"));
+ }
+ }
+ /* last attempt */
+ status = pj_activesock_send(stun_sock->active_sock,
+ &stun_sock->int_send_key, pkt, &size, 0);
+ }
+#endif
+ return status;
}
/* This callback is called by the STUN session when outgoing transaction
@@ -942,8 +1684,6 @@ static pj_bool_t on_data_recvfrom(pj_activesock_t *asock,
pj_status_t status)
{
pj_stun_sock *stun_sock;
- pj_stun_msg_hdr *hdr;
- pj_uint16_t type;
stun_sock = (pj_stun_sock*) pj_activesock_get_user_data(asock);
if (!stun_sock)
@@ -955,58 +1695,7 @@ static pj_bool_t on_data_recvfrom(pj_activesock_t *asock,
return PJ_TRUE;
}
- pj_grp_lock_acquire(stun_sock->grp_lock);
-
- /* Check that this is STUN message */
- status = pj_stun_msg_check((const pj_uint8_t*)data, size,
- PJ_STUN_IS_DATAGRAM | PJ_STUN_CHECK_PACKET);
- if (status != PJ_SUCCESS) {
- /* Not STUN -- give it to application */
- goto process_app_data;
- }
-
- /* Treat packet as STUN header and copy the STUN message type.
- * We don't want to access the type directly from the header
- * since it may not be properly aligned.
- */
- hdr = (pj_stun_msg_hdr*) data;
- pj_memcpy(&type, &hdr->type, 2);
- type = pj_ntohs(type);
-
- /* If the packet is a STUN Binding response and part of the
- * transaction ID matches our internal ID, then this is
- * our internal STUN message (Binding request or keep alive).
- * Give it to our STUN session.
- */
- if (!PJ_STUN_IS_RESPONSE(type) ||
- PJ_STUN_GET_METHOD(type) != PJ_STUN_BINDING_METHOD ||
- pj_memcmp(hdr->tsx_id, stun_sock->tsx_id, 10) != 0)
- {
- /* Not STUN Binding response, or STUN transaction ID mismatch.
- * This is not our message too -- give it to application.
- */
- goto process_app_data;
- }
-
- /* This is our STUN Binding response. Give it to the STUN session */
- status = pj_stun_session_on_rx_pkt(stun_sock->stun_sess, data, size,
- PJ_STUN_IS_DATAGRAM, NULL, NULL,
- src_addr, addr_len);
-
- status = pj_grp_lock_release(stun_sock->grp_lock);
-
- return status!=PJ_EGONE ? PJ_TRUE : PJ_FALSE;
-
-process_app_data:
- if (stun_sock->cb.on_rx_data) {
- (*stun_sock->cb.on_rx_data)(stun_sock, data, (unsigned)size,
- src_addr, addr_len);
- status = pj_grp_lock_release(stun_sock->grp_lock);
- return status!=PJ_EGONE ? PJ_TRUE : PJ_FALSE;
- }
-
- status = pj_grp_lock_release(stun_sock->grp_lock);
- return status!=PJ_EGONE ? PJ_TRUE : PJ_FALSE;
+ return parse_rx_packet(asock, data, size, src_addr, addr_len);
}
/* Callback from active socket about send status */
@@ -1047,3 +1736,8 @@ static pj_bool_t on_data_sent(pj_activesock_t *asock,
return PJ_TRUE;
}
+pj_stun_session* pj_stun_sock_get_session(pj_stun_sock *stun_sock)
+{
+ return stun_sock ? stun_sock->stun_sess : NULL;
+}
+
diff --git a/pjnath/src/pjnath/stun_transaction.c b/pjnath/src/pjnath/stun_transaction.c
index e4d67db0f..569b4826d 100644
--- a/pjnath/src/pjnath/stun_transaction.c
+++ b/pjnath/src/pjnath/stun_transaction.c
@@ -396,6 +396,9 @@ static void retransmit_timer_callback(pj_timer_heap_t *timer_heap,
PJ_DEF(pj_status_t) pj_stun_client_tsx_retransmit(pj_stun_client_tsx *tsx,
pj_bool_t mod_count)
{
+ if (!tsx)
+ return PJ_EINVAL;
+
if (tsx->destroy_timer.id != 0) {
return PJ_SUCCESS;
}
diff --git a/pjnath/src/pjnath/turn_session.c b/pjnath/src/pjnath/turn_session.c
index a378b8672..88985af69 100644
--- a/pjnath/src/pjnath/turn_session.c
+++ b/pjnath/src/pjnath/turn_session.c
@@ -311,7 +311,8 @@ PJ_DEF(pj_status_t) pj_turn_session_create( const pj_stun_config *cfg,
stun_cb.on_request_complete = &stun_on_request_complete;
stun_cb.on_rx_indication = &stun_on_rx_indication;
status = pj_stun_session_create(&sess->stun_cfg, sess->obj_name, &stun_cb,
- PJ_FALSE, sess->grp_lock, &sess->stun);
+ PJ_FALSE, sess->grp_lock, &sess->stun,
+ conn_type);
if (status != PJ_SUCCESS) {
do_destroy(sess);
return status;
diff --git a/pjnath/src/pjnath/turn_sock.c b/pjnath/src/pjnath/turn_sock.c
index dc6304d9f..a6e192e9f 100644
--- a/pjnath/src/pjnath/turn_sock.c
+++ b/pjnath/src/pjnath/turn_sock.c
@@ -894,12 +894,7 @@ static pj_bool_t on_data_sent(pj_turn_sock *turn_sock,
}
if (turn_sock->cb.on_data_sent) {
- pj_ssize_t header_len, sent_size;
-
- /* Remove the length of packet header from sent size. */
- header_len = turn_sock->pkt_len - turn_sock->body_len;
- sent_size = (sent > header_len)? (sent - header_len) : 0;
- (*turn_sock->cb.on_data_sent)(turn_sock, sent_size);
+ (*turn_sock->cb.on_data_sent)(turn_sock, sent);
}
return PJ_TRUE;
@@ -1766,3 +1761,20 @@ static void turn_on_connection_bind_status(pj_turn_session *sess,
peer_addr, addr_len);
}
}
+
+pj_bool_t pj_turn_sock_has_dataconn(pj_turn_sock *turn_sock,
+ const pj_sockaddr_t *peer)
+{
+ if (!turn_sock) return PJ_FALSE;
+
+ for (int i = 0; i < turn_sock->data_conn_cnt; ++i) {
+ tcp_data_conn_t* dataconn = &turn_sock->data_conn[i];
+ if (dataconn) {
+ pj_sockaddr_t* conn_peer = &dataconn->peer_addr;
+ if (pj_sockaddr_cmp(conn_peer, peer) == 0)
+ return PJ_TRUE;
+ }
+ }
+
+ return PJ_FALSE;
+}
diff --git a/pjnath/src/pjturn-client/client_main.c b/pjnath/src/pjturn-client/client_main.c
index 6f9f1ff1a..e56c510c8 100644
--- a/pjnath/src/pjturn-client/client_main.c
+++ b/pjnath/src/pjturn-client/client_main.c
@@ -155,7 +155,7 @@ static int init()
name[strlen(name)-1] = '0'+i;
status = pj_stun_sock_create(&g.stun_config, name, pj_AF_INET(),
- &stun_sock_cb, &ss_cfg,
+ PJ_STUN_TP_UDP, &stun_sock_cb, &ss_cfg,
&g.peer[i], &g.peer[i].stun_sock);
if (status != PJ_SUCCESS) {
my_perror("pj_stun_sock_create()", status);
diff --git a/pjnath/src/pjturn-srv/allocation.c b/pjnath/src/pjturn-srv/allocation.c
index 6c9c9ce11..99d1545c7 100644
--- a/pjnath/src/pjturn-srv/allocation.c
+++ b/pjnath/src/pjturn-srv/allocation.c
@@ -338,7 +338,8 @@ PJ_DEF(pj_status_t) pj_turn_allocation_create(pj_turn_transport *transport,
sess_cb.on_rx_request = &stun_on_rx_request;
sess_cb.on_rx_indication = &stun_on_rx_indication;
status = pj_stun_session_create(&srv->core.stun_cfg, alloc->obj_name,
- &sess_cb, PJ_FALSE, NULL, &alloc->sess);
+ &sess_cb, PJ_FALSE, NULL, &alloc->sess,
+ PJ_STUN_TP_UDP);
if (status != PJ_SUCCESS) {
goto on_error;
}
diff --git a/pjnath/src/pjturn-srv/server.c b/pjnath/src/pjturn-srv/server.c
index 94dda29a3..95ad1793d 100644
--- a/pjnath/src/pjturn-srv/server.c
+++ b/pjnath/src/pjturn-srv/server.c
@@ -156,7 +156,7 @@ PJ_DEF(pj_status_t) pj_turn_srv_create(pj_pool_factory *pf,
status = pj_stun_session_create(&srv->core.stun_cfg, srv->obj_name,
&sess_cb, PJ_FALSE, NULL,
- &srv->core.stun_sess);
+ &srv->core.stun_sess, PJ_STUN_TP_UDP);
if (status != PJ_SUCCESS) {
goto on_error;
}
diff --git a/pjsip-apps/src/samples/icedemo.c b/pjsip-apps/src/samples/icedemo.c
index 07ccc31f0..3b93b9417 100644
--- a/pjsip-apps/src/samples/icedemo.c
+++ b/pjsip-apps/src/samples/icedemo.c
@@ -44,6 +44,7 @@ static struct app_t
pj_str_t stun_srv;
pj_str_t turn_srv;
pj_bool_t turn_tcp;
+ pj_bool_t ice_tcp;
pj_str_t turn_username;
pj_str_t turn_password;
pj_bool_t turn_fingerprint;
@@ -341,6 +342,12 @@ static pj_status_t icedemo_init(void)
else
icedemo.ice_cfg.opt.aggressive = PJ_TRUE;
+ /* Connection type to STUN server */
+ if (icedemo.opt.ice_tcp)
+ icedemo.ice_cfg.stun.conn_type = PJ_STUN_TP_TCP;
+ else
+ icedemo.ice_cfg.stun.conn_type = PJ_STUN_TP_UDP;
+
/* Configure STUN/srflx candidate resolution */
if (icedemo.opt.stun_srv.slen) {
char *pos;
@@ -384,7 +391,7 @@ static pj_status_t icedemo_init(void)
icedemo.ice_cfg.turn.auth_cred.data.static_cred.data = icedemo.opt.turn_password;
/* Connection type to TURN server */
- if (icedemo.opt.turn_tcp)
+ if (icedemo.opt.ice_tcp)
icedemo.ice_cfg.turn.conn_type = PJ_TURN_TP_TCP;
else
icedemo.ice_cfg.turn.conn_type = PJ_TURN_TP_UDP;
@@ -395,6 +402,10 @@ static pj_status_t icedemo_init(void)
icedemo.ice_cfg.turn.alloc_param.ka_interval = KA_INTERVAL;
}
+ if (icedemo.opt.ice_tcp) {
+ icedemo.ice_cfg.protocol = PJ_ICE_TP_TCP;
+ }
+
/* -= That's it for now, initialization is complete =- */
return PJ_SUCCESS;
}
@@ -530,10 +541,27 @@ static int print_cand(char buffer[], unsigned maxlen,
char *p = buffer;
int printed;
- PRINT("a=candidate:%.*s %u UDP %u %s %u typ ",
+ /** Section 4.5, RFC 6544 (https://tools.ietf.org/html/rfc6544)
+ * candidate-attribute = "candidate" ":" foundation SP component-id SP
+ * "TCP" SP
+ * priority SP
+ * connection-address SP
+ * port SP
+ * cand-type
+ * [SP rel-addr]
+ * [SP rel-port]
+ * SP tcp-type-ext
+ * *(SP extension-att-name SP
+ * extension-att-value)
+ *
+ * tcp-type-ext = "tcptype" SP tcp-type
+ * tcp-type = "active" / "passive" / "so"
+ */
+ PRINT("a=candidate:%.*s %u %s %u %s %u typ ",
(int)cand->foundation.slen,
cand->foundation.ptr,
(unsigned)cand->comp_id,
+ cand->transport == PJ_CAND_UDP? "UDP" : "TCP",
cand->prio,
pj_sockaddr_print(&cand->addr, ipaddr,
sizeof(ipaddr), 0),
@@ -542,6 +570,23 @@ static int print_cand(char buffer[], unsigned maxlen,
PRINT("%s\n",
pj_ice_get_cand_type_name(cand->type));
+ if (cand->transport != PJ_CAND_UDP) {
+ PRINT(" tcptype");
+ switch (cand->transport) {
+ case PJ_CAND_TCP_ACTIVE:
+ PRINT(" active");
+ break;
+ case PJ_CAND_TCP_PASSIVE:
+ PRINT(" passive");
+ break;
+ case PJ_CAND_TCP_SO:
+ default:
+ PRINT(" so");
+ break;
+ }
+ }
+ PRINT("\n");
+
if (p == buffer+maxlen)
return -PJ_ETOOSMALL;
@@ -608,6 +653,26 @@ static int encode_session(char buffer[], unsigned maxlen)
sizeof(ipaddr), 0));
}
+ if (cand[0].transport != PJ_CAND_UDP) {
+ /** RFC 6544, Section 4.5:
+ * If the default candidate is TCP-based, the agent MUST include the
+ * a=setup and a=connection attributes from RFC 4145 [RFC4145],
+ * following the procedures defined there as if ICE were not in use.
+ */
+ PRINT("a=setup:");
+ switch (cand[0].transport) {
+ case PJ_CAND_TCP_ACTIVE:
+ PRINT("active\n");
+ break;
+ case PJ_CAND_TCP_PASSIVE:
+ PRINT("passive\n");
+ break;
+ default:
+ return PJ_EINVALIDOP;
+ }
+ PRINT("a=connection:new\n");
+ }
+
/* Enumerate all candidates for this component */
cand_cnt = PJ_ARRAY_SIZE(cand);
status = pj_ice_strans_enum_cands(icedemo.icest, comp+1,
@@ -709,7 +774,7 @@ static void icedemo_show_ice(void)
*/
static void icedemo_input_remote(void)
{
- char linebuf[80];
+ char linebuf[120];
unsigned media_cnt = 0;
unsigned comp0_port = 0;
char comp0_addr[80];
@@ -819,27 +884,43 @@ static void icedemo_input_remote(void)
pj_sockaddr_set_port(&icedemo.rem.def_addr[1], (pj_uint16_t)port);
} else if (strcmp(attr, "candidate")==0) {
+ /** Section 4.5, RFC 6544 (https://tools.ietf.org/html/rfc6544)
+ * candidate-attribute = "candidate" ":" foundation SP component-id
+ * SP "TCP" SP priority SP connection-address SP port SP cand-type [SP
+ * rel-addr] [SP rel-port] SP tcp-type-ext
+ * *(SP extension-att-name SP
+ * extension-att-value)
+ *
+ * tcp-type-ext = "tcptype" SP tcp-type
+ * tcp-type = "active" / "passive" / "so"
+ */
char *sdpcand = attr+strlen(attr)+1;
int af, cnt;
- char foundation[32], transport[12], ipaddr[80], type[32];
+ char foundation[32], transport[12], ipaddr[80], type[32], tcp_type[32];
pj_str_t tmpaddr;
int comp_id, prio, port;
pj_ice_sess_cand *cand;
pj_status_t status;
+ pj_bool_t is_tcp = PJ_FALSE;
- cnt = sscanf(sdpcand, "%s %d %s %d %s %d typ %s",
+ cnt = sscanf(sdpcand, "%s %d %s %d %s %d typ %s tcptype %s\n",
foundation,
&comp_id,
transport,
&prio,
ipaddr,
&port,
- type);
- if (cnt != 7) {
+ type,
+ tcp_type);
+ if (cnt != 7 && cnt != 8) {
PJ_LOG(1, (THIS_FILE, "error: Invalid ICE candidate line"));
goto on_error;
}
+ if (strcmp(transport, "TCP") == 0) {
+ is_tcp = PJ_TRUE;
+ }
+
cand = &icedemo.rem.cand[icedemo.rem.cand_cnt];
pj_bzero(cand, sizeof(*cand));
@@ -855,6 +936,23 @@ static void icedemo_input_remote(void)
goto on_error;
}
+ if (is_tcp) {
+ if (strcmp(tcp_type, "active") == 0)
+ cand->transport = PJ_CAND_TCP_ACTIVE;
+ else if (strcmp(tcp_type, "passive") == 0)
+ cand->transport = PJ_CAND_TCP_PASSIVE;
+ else if (strcmp(tcp_type, "so") == 0)
+ cand->transport = PJ_CAND_TCP_SO;
+ else {
+ PJ_LOG(1, (THIS_FILE,
+ "Error: invalid transport type '%s'",
+ tcp_type));
+ goto on_error;
+ }
+ } else {
+ cand->transport = PJ_CAND_UDP;
+ }
+
cand->comp_id = (pj_uint8_t)comp_id;
pj_strdup2(icedemo.pool, &cand->foundation, foundation);
cand->prio = prio;
@@ -879,6 +977,10 @@ static void icedemo_input_remote(void)
if (cand->comp_id > icedemo.rem.comp_cnt)
icedemo.rem.comp_cnt = cand->comp_id;
+ } else if (strcmp(attr, "setup") == 0) {
+ // TODO
+ } else if (strcmp(attr, "connection") == 0) {
+ // TODO
}
}
break;
diff --git a/pjsip/src/pjsua-lib/pjsua_core.c b/pjsip/src/pjsua-lib/pjsua_core.c
index 474a8d07c..9257f07a4 100644
--- a/pjsip/src/pjsua-lib/pjsua_core.c
+++ b/pjsip/src/pjsua-lib/pjsua_core.c
@@ -1548,7 +1548,7 @@ static void resolve_stun_entry(pjsua_stun_resolve *sess)
stun_sock_cb.on_status = &test_stun_on_status;
sess->async_wait = PJ_FALSE;
status = pj_stun_sock_create(&pjsua_var.stun_cfg, "stunresolve",
- sess->af, &stun_sock_cb,
+ sess->af, PJ_STUN_TP_UDP, &stun_sock_cb,
NULL, sess, &sess->stun_sock);
if (status != PJ_SUCCESS) {
char errmsg[PJ_ERR_MSG_SIZE];
--
2.26.2