1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-11-28 08:02:54 +00:00

socket: Rename sb(un)lock() and interlock with listen(2)

In preparation for moving sockbuf locks into the containing socket,
provide alternative macros for the sockbuf I/O locks:
SOCK_IO_SEND_(UN)LOCK() and SOCK_IO_RECV_(UN)LOCK().  These operate on a
socket rather than a socket buffer.  Note that these locks are used only
to prevent concurrent readers and writters from interleaving I/O.

When locking for I/O, return an error if the socket is a listening
socket.  Currently the check is racy since the sockbuf sx locks are
destroyed during the transition to a listening socket, but that will no
longer be true after some follow-up changes.

Modify a few places to check for errors from
sblock()/SOCK_IO_(SEND|RECV)_LOCK() where they were not before.  In
particular, add checks to sendfile() and sorflush().

Reviewed by:	tuexen, gallatin
MFC after:	1 month
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D31657
This commit is contained in:
Mark Johnston 2021-09-07 14:49:31 -04:00
parent 97cf43ebc5
commit f94acf52a4
10 changed files with 103 additions and 93 deletions

View File

@ -2203,14 +2203,14 @@ t4_aiotx_process_job(struct toepcb *toep, struct socket *so, struct kaiocb *job)
/* Inline sosend_generic(). */
error = sblock(sb, SBL_WAIT);
error = SOCK_IO_SEND_LOCK(so, SBL_WAIT);
MPASS(error == 0);
sendanother:
SOCKBUF_LOCK(sb);
if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
SOCKBUF_UNLOCK(sb);
sbunlock(sb);
SOCK_IO_SEND_UNLOCK(so);
if ((so->so_options & SO_NOSIGPIPE) == 0) {
PROC_LOCK(job->userproc);
kern_psignal(job->userproc, SIGPIPE);
@ -2223,12 +2223,12 @@ t4_aiotx_process_job(struct toepcb *toep, struct socket *so, struct kaiocb *job)
error = so->so_error;
so->so_error = 0;
SOCKBUF_UNLOCK(sb);
sbunlock(sb);
SOCK_IO_SEND_UNLOCK(so);
goto out;
}
if ((so->so_state & SS_ISCONNECTED) == 0) {
SOCKBUF_UNLOCK(sb);
sbunlock(sb);
SOCK_IO_SEND_UNLOCK(so);
error = ENOTCONN;
goto out;
}
@ -2241,13 +2241,13 @@ t4_aiotx_process_job(struct toepcb *toep, struct socket *so, struct kaiocb *job)
*/
if (!aio_set_cancel_function(job, t4_aiotx_cancel)) {
SOCKBUF_UNLOCK(sb);
sbunlock(sb);
SOCK_IO_SEND_UNLOCK(so);
error = ECANCELED;
goto out;
}
TAILQ_INSERT_HEAD(&toep->aiotx_jobq, job, list);
SOCKBUF_UNLOCK(sb);
sbunlock(sb);
SOCK_IO_SEND_UNLOCK(so);
goto out;
}
@ -2274,7 +2274,7 @@ t4_aiotx_process_job(struct toepcb *toep, struct socket *so, struct kaiocb *job)
m = alloc_aiotx_mbuf(job, len);
if (m == NULL) {
sbunlock(sb);
SOCK_IO_SEND_UNLOCK(so);
error = EFAULT;
goto out;
}
@ -2285,7 +2285,7 @@ t4_aiotx_process_job(struct toepcb *toep, struct socket *so, struct kaiocb *job)
INP_WLOCK(inp);
if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
INP_WUNLOCK(inp);
sbunlock(sb);
SOCK_IO_SEND_UNLOCK(so);
error = ECONNRESET;
goto out;
}
@ -2307,7 +2307,7 @@ t4_aiotx_process_job(struct toepcb *toep, struct socket *so, struct kaiocb *job)
INP_WUNLOCK(inp);
if (sendmore)
goto sendanother;
sbunlock(sb);
SOCK_IO_SEND_UNLOCK(so);
if (error)
goto out;

View File

@ -664,18 +664,17 @@ hvs_trans_soreceive(struct socket *so, struct sockaddr **paddr,
if (uio->uio_resid == 0 || uio->uio_rw != UIO_READ)
return (EINVAL);
sb = &so->so_rcv;
orig_resid = uio->uio_resid;
/* Prevent other readers from entering the socket. */
error = sblock(sb, SBLOCKWAIT(flags));
error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags));
if (error) {
HVSOCK_DBG(HVSOCK_DBG_ERR,
"%s: sblock returned error = %d\n", __func__, error);
"%s: soiolock returned error = %d\n", __func__, error);
return (error);
}
sb = &so->so_rcv;
SOCKBUF_LOCK(sb);
cbarg.uio = uio;
@ -779,8 +778,7 @@ hvs_trans_soreceive(struct socket *so, struct sockaddr **paddr,
out:
SOCKBUF_UNLOCK(sb);
sbunlock(sb);
SOCK_IO_RECV_UNLOCK(so);
/* We recieved a FIN in this call */
if (so->so_error == ESHUTDOWN) {
@ -823,18 +821,17 @@ hvs_trans_sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
if (uio->uio_resid == 0 || uio->uio_rw != UIO_WRITE)
return (EINVAL);
sb = &so->so_snd;
orig_resid = uio->uio_resid;
/* Prevent other writers from entering the socket. */
error = sblock(sb, SBLOCKWAIT(flags));
error = SOCK_IO_SEND_LOCK(so, SBLOCKWAIT(flags));
if (error) {
HVSOCK_DBG(HVSOCK_DBG_ERR,
"%s: sblock returned error = %d\n", __func__, error);
"%s: soiolocak returned error = %d\n", __func__, error);
return (error);
}
sb = &so->so_snd;
SOCKBUF_LOCK(sb);
if ((sb->sb_state & SBS_CANTSENDMORE) ||
@ -893,7 +890,7 @@ hvs_trans_sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
out:
SOCKBUF_UNLOCK(sb);
sbunlock(sb);
SOCK_IO_SEND_UNLOCK(so);
return (error);
}
@ -1674,7 +1671,7 @@ hvsock_detach(device_t dev)
{
struct hvsock_sc *sc = (struct hvsock_sc *)device_get_softc(dev);
struct socket *so;
int error, retry;
int retry;
if (bootverbose)
device_printf(dev, "hvsock_detach called.\n");
@ -1703,8 +1700,7 @@ hvsock_detach(device_t dev)
*/
if (so) {
retry = 0;
while ((error = sblock(&so->so_rcv, 0)) ==
EWOULDBLOCK) {
while (SOCK_IO_RECV_LOCK(so, 0) == EWOULDBLOCK) {
/*
* Someone is reading, rx br is busy
*/
@ -1715,8 +1711,7 @@ hvsock_detach(device_t dev)
"retry = %d\n", retry++);
}
retry = 0;
while ((error = sblock(&so->so_snd, 0)) ==
EWOULDBLOCK) {
while (SOCK_IO_SEND_LOCK(so, 0) == EWOULDBLOCK) {
/*
* Someone is sending, tx br is busy
*/
@ -1734,8 +1729,8 @@ hvsock_detach(device_t dev)
sc->pcb = NULL;
if (so) {
sbunlock(&so->so_rcv);
sbunlock(&so->so_snd);
SOCK_IO_RECV_UNLOCK(so);
SOCK_IO_SEND_UNLOCK(so);
so->so_pcb = NULL;
}

View File

@ -741,7 +741,9 @@ vn_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
* XXXRW: Historically this has assumed non-interruptibility, so now
* we implement that, but possibly shouldn't.
*/
(void)sblock(&so->so_snd, SBL_WAIT | SBL_NOINTR);
error = SOCK_IO_SEND_LOCK(so, SBL_WAIT | SBL_NOINTR);
if (error != 0)
goto out;
#ifdef KERN_TLS
tls = ktls_hold(so->so_snd.sb_tls_info);
#endif
@ -1211,7 +1213,7 @@ vn_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
* Send trailers. Wimp out and use writev(2).
*/
if (trl_uio != NULL) {
sbunlock(&so->so_snd);
SOCK_IO_SEND_UNLOCK(so);
error = kern_writev(td, sockfd, trl_uio);
if (error == 0)
sbytes += td->td_retval[0];
@ -1219,7 +1221,7 @@ vn_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
}
done:
sbunlock(&so->so_snd);
SOCK_IO_SEND_UNLOCK(so);
out:
/*
* If there was no error we have to clear td->td_retval[0]

View File

@ -1171,7 +1171,7 @@ ktls_enable_tx(struct socket *so, struct tls_enable *en)
return (error);
}
error = sblock(&so->so_snd, SBL_WAIT);
error = SOCK_IO_SEND_LOCK(so, SBL_WAIT);
if (error) {
ktls_cleanup(tls);
return (error);
@ -1191,7 +1191,7 @@ ktls_enable_tx(struct socket *so, struct tls_enable *en)
so->so_snd.sb_flags |= SB_TLS_IFNET;
SOCKBUF_UNLOCK(&so->so_snd);
INP_WUNLOCK(inp);
sbunlock(&so->so_snd);
SOCK_IO_SEND_UNLOCK(so);
counter_u64_add(ktls_offload_total, 1);
@ -1292,7 +1292,7 @@ ktls_set_tx_mode(struct socket *so, int mode)
return (error);
}
error = sblock(&so->so_snd, SBL_WAIT);
error = SOCK_IO_SEND_LOCK(so, SBL_WAIT);
if (error) {
counter_u64_add(ktls_switch_failed, 1);
ktls_free(tls_new);
@ -1307,7 +1307,7 @@ ktls_set_tx_mode(struct socket *so, int mode)
*/
if (tls != so->so_snd.sb_tls_info) {
counter_u64_add(ktls_switch_failed, 1);
sbunlock(&so->so_snd);
SOCK_IO_SEND_UNLOCK(so);
ktls_free(tls_new);
ktls_free(tls);
INP_WLOCK(inp);
@ -1319,7 +1319,7 @@ ktls_set_tx_mode(struct socket *so, int mode)
if (tls_new->mode != TCP_TLS_MODE_SW)
so->so_snd.sb_flags |= SB_TLS_IFNET;
SOCKBUF_UNLOCK(&so->so_snd);
sbunlock(&so->so_snd);
SOCK_IO_SEND_UNLOCK(so);
/*
* Drop two references on 'tls'. The first is for the

View File

@ -475,34 +475,6 @@ sbwait(struct sockbuf *sb)
sb->sb_timeo, 0, 0));
}
int
sblock(struct sockbuf *sb, int flags)
{
KASSERT((flags & SBL_VALID) == flags,
("sblock: flags invalid (0x%x)", flags));
if (flags & SBL_WAIT) {
if ((sb->sb_flags & SB_NOINTR) ||
(flags & SBL_NOINTR)) {
sx_xlock(&sb->sb_sx);
return (0);
}
return (sx_xlock_sig(&sb->sb_sx));
} else {
if (sx_try_xlock(&sb->sb_sx) == 0)
return (EWOULDBLOCK);
return (0);
}
}
void
sbunlock(struct sockbuf *sb)
{
sx_xunlock(&sb->sb_sx);
}
/*
* Wakeup processes waiting on a socket buffer. Do asynchronous notification
* via SIGIO if the socket has the SS_ASYNC flag set.

View File

@ -1587,7 +1587,7 @@ sosend_generic(struct socket *so, struct sockaddr *addr, struct uio *uio,
if (control != NULL)
clen = control->m_len;
error = sblock(&so->so_snd, SBLOCKWAIT(flags));
error = SOCK_IO_SEND_LOCK(so, SBLOCKWAIT(flags));
if (error)
goto out;
@ -1785,7 +1785,7 @@ sosend_generic(struct socket *so, struct sockaddr *addr, struct uio *uio,
} while (resid);
release:
sbunlock(&so->so_snd);
SOCK_IO_SEND_UNLOCK(so);
out:
#ifdef KERN_TLS
if (tls != NULL)
@ -1932,7 +1932,7 @@ soreceive_generic(struct socket *so, struct sockaddr **psa, struct uio *uio,
(*pr->pr_usrreqs->pru_rcvd)(so, 0);
}
error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags));
if (error)
return (error);
@ -2387,7 +2387,7 @@ soreceive_generic(struct socket *so, struct sockaddr **psa, struct uio *uio,
if (flagsp != NULL)
*flagsp |= flags;
release:
sbunlock(&so->so_rcv);
SOCK_IO_RECV_UNLOCK(so);
return (error);
}
@ -2434,7 +2434,7 @@ soreceive_stream(struct socket *so, struct sockaddr **psa, struct uio *uio,
#endif
/* Prevent other readers from entering the socket. */
error = sblock(sb, SBLOCKWAIT(flags));
error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags));
if (error)
return (error);
SOCKBUF_LOCK(sb);
@ -2442,7 +2442,7 @@ soreceive_stream(struct socket *so, struct sockaddr **psa, struct uio *uio,
#ifdef KERN_TLS
if (sb->sb_tls_info != NULL) {
SOCKBUF_UNLOCK(sb);
sbunlock(sb);
SOCK_IO_RECV_UNLOCK(so);
return (soreceive_generic(so, psa, uio, mp0, controlp,
flagsp));
}
@ -2605,11 +2605,10 @@ soreceive_stream(struct socket *so, struct sockaddr **psa, struct uio *uio,
if ((flags & MSG_WAITALL) && uio->uio_resid > 0)
goto restart;
out:
SOCKBUF_LOCK_ASSERT(sb);
SBLASTRECORDCHK(sb);
SBLASTMBUFCHK(sb);
SOCKBUF_UNLOCK(sb);
sbunlock(sb);
SOCK_IO_RECV_UNLOCK(so);
return (error);
}
@ -2876,6 +2875,7 @@ sorflush(struct socket *so)
struct sockbuf *sb = &so->so_rcv;
struct protosw *pr = so->so_proto;
struct socket aso;
int error;
VNET_SO_ASSERT(so);
@ -2893,7 +2893,9 @@ sorflush(struct socket *so)
* despite any existing socket disposition on interruptable waiting.
*/
socantrcvmore(so);
(void) sblock(sb, SBL_WAIT | SBL_NOINTR);
error = SOCK_IO_RECV_LOCK(so, SBL_WAIT | SBL_NOINTR);
KASSERT(error == 0, ("%s: cannot lock sock %p recv buffer",
__func__, so));
/*
* Invalidate/clear most of the sockbuf structure, but leave selinfo
@ -2907,7 +2909,7 @@ sorflush(struct socket *so)
bzero(&sb->sb_startzero,
sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
SOCKBUF_UNLOCK(sb);
sbunlock(sb);
SOCK_IO_RECV_UNLOCK(so);
/*
* Dispose of special rights and flush the copied socket. Don't call
@ -4100,6 +4102,39 @@ soisdisconnected(struct socket *so)
wakeup(&so->so_timeo);
}
int
soiolock(struct socket *so, struct sx *sx, int flags)
{
int error;
KASSERT((flags & SBL_VALID) == flags,
("soiolock: invalid flags %#x", flags));
if ((flags & SBL_WAIT) != 0) {
if ((flags & SBL_NOINTR) != 0) {
sx_xlock(sx);
} else {
error = sx_xlock_sig(sx);
if (error != 0)
return (error);
}
} else if (!sx_try_xlock(sx)) {
return (EWOULDBLOCK);
}
if (__predict_false(SOLISTENING(so))) {
sx_xunlock(sx);
return (ENOTCONN);
}
return (0);
}
void
soiounlock(struct sx *sx)
{
sx_xunlock(sx);
}
/*
* Make a copy of a sockaddr in a malloced buffer of type M_SONAME.
*/

View File

@ -4796,10 +4796,10 @@ sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
old_so = old_inp->sctp_socket;
new_so = new_inp->sctp_socket;
TAILQ_INIT(&tmp_queue);
error = sblock(&old_so->so_rcv, waitflags);
error = SOCK_IO_RECV_LOCK(old_so, waitflags);
if (error) {
/*
* Gak, can't get sblock, we have a problem. data will be
* Gak, can't get I/O lock, we have a problem. data will be
* left stranded.. and we don't dare look at it since the
* other thread may be reading something. Oh well, its a
* screwed up app that does a peeloff OR a accept while
@ -4831,9 +4831,8 @@ sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
}
}
SCTP_INP_READ_UNLOCK(old_inp);
/* Remove the sb-lock on the old socket */
sbunlock(&old_so->so_rcv);
/* Remove the recv-lock on the old socket */
SOCK_IO_RECV_UNLOCK(old_so);
/* Now we move them over to the new socket buffer */
SCTP_INP_READ_LOCK(new_inp);
TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
@ -5586,7 +5585,7 @@ sctp_sorecvmsg(struct socket *so,
rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
}
error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
error = SOCK_IO_RECV_LOCK(so, (block_allowed ? SBL_WAIT : 0));
if (error) {
goto release_unlocked;
}
@ -6234,8 +6233,8 @@ sctp_sorecvmsg(struct socket *so,
}
/*
* We need to wait for more data a few things: - We don't
* sbunlock() so we don't get someone else reading. - We
* must be sure to account for the case where what is added
* release the I/O lock so we don't get someone else reading.
* - We must be sure to account for the case where what is added
* is NOT to our control when we wakeup.
*/
@ -6383,7 +6382,7 @@ sctp_sorecvmsg(struct socket *so,
hold_sblock = 0;
}
sbunlock(&so->so_rcv);
SOCK_IO_RECV_UNLOCK(so);
sockbuf_lock = 0;
release_unlocked:
@ -6418,7 +6417,7 @@ sctp_sorecvmsg(struct socket *so,
SOCKBUF_UNLOCK(&so->so_rcv);
}
if (sockbuf_lock) {
sbunlock(&so->so_rcv);
SOCK_IO_RECV_UNLOCK(so);
}
if (freecnt_applied) {

View File

@ -1105,7 +1105,7 @@ sdp_sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
td->td_ru.ru_msgsnd++;
ssk = sdp_sk(so);
error = sblock(&so->so_snd, SBLOCKWAIT(flags));
error = SOCK_IO_SEND_LOCK(so, SBLOCKWAIT(flags));
if (error)
goto out;
@ -1196,7 +1196,7 @@ sdp_sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
} while (resid);
release:
sbunlock(&so->so_snd);
SOCK_IO_SEND_UNLOCK(so);
out:
if (top != NULL)
m_freem(top);
@ -1267,9 +1267,9 @@ sdp_sorecv(struct socket *so, struct sockaddr **psa, struct uio *uio,
ssk = sdp_sk(so);
/* Prevent other readers from entering the socket. */
error = sblock(sb, SBLOCKWAIT(flags));
error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags));
if (error)
goto out;
return (error);
SOCKBUF_LOCK(sb);
/* Easy one, no space to copyout anything. */
@ -1423,11 +1423,10 @@ sdp_sorecv(struct socket *so, struct sockaddr **psa, struct uio *uio,
if ((flags & MSG_WAITALL) && uio->uio_resid > 0)
goto restart;
out:
SOCKBUF_LOCK_ASSERT(sb);
SBLASTRECORDCHK(sb);
SBLASTMBUFCHK(sb);
SOCKBUF_UNLOCK(sb);
sbunlock(sb);
SOCK_IO_RECV_UNLOCK(so);
return (error);
}

View File

@ -78,7 +78,6 @@ struct selinfo;
*
* Locking key to struct sockbuf:
* (a) locked by SOCKBUF_LOCK().
* (b) locked by sblock()
*/
struct sockbuf {
struct mtx sb_mtx; /* sockbuf lock */
@ -183,8 +182,6 @@ struct mbuf *
struct mbuf *
sbsndmbuf(struct sockbuf *sb, u_int off, u_int *moff);
int sbwait(struct sockbuf *sb);
int sblock(struct sockbuf *sb, int flags);
void sbunlock(struct sockbuf *sb);
void sballoc(struct sockbuf *, struct mbuf *);
void sbfree(struct sockbuf *, struct mbuf *);
void sballoc_ktls_rx(struct sockbuf *sb, struct mbuf *m);

View File

@ -249,12 +249,21 @@ struct socket {
*/
/*
* Flags to sblock().
* Flags to soiolock().
*/
#define SBL_WAIT 0x00000001 /* Wait if not immediately available. */
#define SBL_NOINTR 0x00000002 /* Force non-interruptible sleep. */
#define SBL_VALID (SBL_WAIT | SBL_NOINTR)
#define SOCK_IO_SEND_LOCK(so, flags) \
soiolock((so), &(so)->so_snd.sb_sx, (flags))
#define SOCK_IO_SEND_UNLOCK(so) \
soiounlock(&(so)->so_snd.sb_sx)
#define SOCK_IO_RECV_LOCK(so, flags) \
soiolock((so), &(so)->so_rcv.sb_sx, (flags))
#define SOCK_IO_RECV_UNLOCK(so) \
soiounlock(&(so)->so_rcv.sb_sx)
/*
* Do we need to notify the other side when I/O is possible?
*/
@ -484,6 +493,8 @@ void socantsendmore(struct socket *so);
void socantsendmore_locked(struct socket *so);
void soroverflow(struct socket *so);
void soroverflow_locked(struct socket *so);
int soiolock(struct socket *so, struct sx *sx, int flags);
void soiounlock(struct sx *sx);
/*
* Accept filter functions (duh).