1
0
mirror of https://git.FreeBSD.org/src.git synced 2025-01-17 15:27:36 +00:00

Move backend-specific fields of kaiocb into a union.

This reduces the size of kaiocb slightly. I've also added some generic
fields that other backends can use in place of the BIO-specific fields.

Change the socket and Chelsio DDP backends to use 'backend3' instead of
abusing _aiocb_private.status directly. This confines the use of
_aiocb_private to the AIO internals in vfs_aio.c.

Reviewed by:	kib (earlier version)
Approved by:	re (gjb)
Sponsored by:	Chelsio Communications
Differential Revision:	https://reviews.freebsd.org/D6547
This commit is contained in:
John Baldwin 2016-06-15 20:56:45 +00:00
parent 9fdbfd3b6c
commit fe0bdd1d2c
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=301930
4 changed files with 51 additions and 30 deletions

View File

@ -74,6 +74,12 @@ VNET_DECLARE(int, tcp_autorcvbuf_inc);
VNET_DECLARE(int, tcp_autorcvbuf_max);
#define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max)
/*
* Use the 'backend3' field in AIO jobs to store the amount of data
* received by the AIO job so far.
*/
#define aio_received backend3
static void aio_ddp_requeue_task(void *context, int pending);
static void ddp_complete_all(struct toepcb *toep, int error);
static void t4_aio_cancel_active(struct kaiocb *job);
@ -204,7 +210,7 @@ ddp_complete_one(struct kaiocb *job, int error)
* it was cancelled, report it as a short read rather than an
* error.
*/
copied = job->uaiocb._aiocb_private.status;
copied = job->aio_received;
if (copied != 0 || error == 0)
aio_complete(job, copied, 0);
else
@ -350,7 +356,7 @@ insert_ddp_data(struct toepcb *toep, uint32_t n)
MPASS((toep->ddp_flags & db_flag) != 0);
db = &toep->db[db_idx];
job = db->job;
copied = job->uaiocb._aiocb_private.status;
copied = job->aio_received;
placed = n;
if (placed > job->uaiocb.aio_nbytes - copied)
placed = job->uaiocb.aio_nbytes - copied;
@ -360,7 +366,7 @@ insert_ddp_data(struct toepcb *toep, uint32_t n)
* t4_aio_cancel_active() completes this
* request.
*/
job->uaiocb._aiocb_private.status += placed;
job->aio_received += placed;
} else if (copied + placed != 0) {
CTR4(KTR_CXGBE,
"%s: completing %p (copied %ld, placed %lu)",
@ -601,16 +607,16 @@ handle_ddp_data(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, int len)
* Update the job's length but defer completion to the
* TCB_RPL callback.
*/
job->uaiocb._aiocb_private.status += len;
job->aio_received += len;
goto out;
} else if (!aio_clear_cancel_function(job)) {
/*
* Update the copied length for when
* t4_aio_cancel_active() completes this request.
*/
job->uaiocb._aiocb_private.status += len;
job->aio_received += len;
} else {
copied = job->uaiocb._aiocb_private.status;
copied = job->aio_received;
#ifdef VERBOSE_TRACES
CTR4(KTR_CXGBE, "%s: completing %p (copied %ld, placed %d)",
__func__, job, copied, len);
@ -698,7 +704,7 @@ handle_ddp_tcb_rpl(struct toepcb *toep, const struct cpl_set_tcb_rpl *cpl)
* also take care of updating the tp, etc.
*/
job = db->job;
copied = job->uaiocb._aiocb_private.status;
copied = job->aio_received;
if (copied == 0) {
CTR2(KTR_CXGBE, "%s: cancelling %p", __func__, job);
aio_cancel(job);
@ -746,7 +752,7 @@ handle_ddp_close(struct toepcb *toep, struct tcpcb *tp, __be32 rcv_nxt)
MPASS((toep->ddp_flags & db_flag) != 0);
db = &toep->db[db_idx];
job = db->job;
copied = job->uaiocb._aiocb_private.status;
copied = job->aio_received;
placed = len;
if (placed > job->uaiocb.aio_nbytes - copied)
placed = job->uaiocb.aio_nbytes - copied;
@ -756,7 +762,7 @@ handle_ddp_close(struct toepcb *toep, struct tcpcb *tp, __be32 rcv_nxt)
* t4_aio_cancel_active() completes this
* request.
*/
job->uaiocb._aiocb_private.status += placed;
job->aio_received += placed;
} else {
CTR4(KTR_CXGBE, "%s: tid %d completed buf %d len %d",
__func__, toep->tid, db_idx, placed);
@ -1212,7 +1218,7 @@ aio_ddp_cancel_one(struct kaiocb *job)
* it was cancelled, report it as a short read rather than an
* error.
*/
copied = job->uaiocb._aiocb_private.status;
copied = job->aio_received;
if (copied != 0)
aio_complete(job, copied, 0);
else
@ -1297,7 +1303,7 @@ aio_ddp_requeue(struct toepcb *toep)
* a short read and leave the error to be reported by
* a future request.
*/
copied = job->uaiocb._aiocb_private.status;
copied = job->aio_received;
if (copied != 0) {
SOCKBUF_UNLOCK(sb);
aio_complete(job, copied, 0);
@ -1371,7 +1377,7 @@ aio_ddp_requeue(struct toepcb *toep)
SOCKBUF_LOCK(sb);
if (so->so_error && sbavail(sb) == 0) {
copied = job->uaiocb._aiocb_private.status;
copied = job->aio_received;
if (copied != 0) {
SOCKBUF_UNLOCK(sb);
recycle_pageset(toep, ps);
@ -1421,9 +1427,9 @@ aio_ddp_requeue(struct toepcb *toep)
* copy those mbufs out directly.
*/
copied = 0;
offset = ps->offset + job->uaiocb._aiocb_private.status;
MPASS(job->uaiocb._aiocb_private.status <= job->uaiocb.aio_nbytes);
resid = job->uaiocb.aio_nbytes - job->uaiocb._aiocb_private.status;
offset = ps->offset + job->aio_received;
MPASS(job->aio_received <= job->uaiocb.aio_nbytes);
resid = job->uaiocb.aio_nbytes - job->aio_received;
m = sb->sb_mb;
KASSERT(m == NULL || toep->ddp_active_count == 0,
("%s: sockbuf data with active DDP", __func__));
@ -1451,8 +1457,8 @@ aio_ddp_requeue(struct toepcb *toep)
}
if (copied != 0) {
sbdrop_locked(sb, copied);
job->uaiocb._aiocb_private.status += copied;
copied = job->uaiocb._aiocb_private.status;
job->aio_received += copied;
copied = job->aio_received;
inp = sotoinpcb(so);
if (!INP_TRY_WLOCK(inp)) {
/*
@ -1568,8 +1574,8 @@ aio_ddp_requeue(struct toepcb *toep)
* which will keep it open and keep the TCP PCB attached until
* after the job is completed.
*/
wr = mk_update_tcb_for_ddp(sc, toep, db_idx, ps,
job->uaiocb._aiocb_private.status, ddp_flags, ddp_flags_mask);
wr = mk_update_tcb_for_ddp(sc, toep, db_idx, ps, job->aio_received,
ddp_flags, ddp_flags_mask);
if (wr == NULL) {
recycle_pageset(toep, ps);
aio_ddp_requeue_one(toep, job);
@ -1727,7 +1733,6 @@ t4_aio_queue_ddp(struct socket *so, struct kaiocb *job)
if (!aio_set_cancel_function(job, t4_aio_cancel_queued))
panic("new job was cancelled");
TAILQ_INSERT_TAIL(&toep->ddp_aiojobq, job, list);
job->uaiocb._aiocb_private.status = 0;
toep->ddp_waiting_count++;
toep->ddp_flags |= DDP_OK;

View File

@ -390,6 +390,12 @@ soo_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
return (0);
}
/*
* Use the 'backend3' field in AIO jobs to store the amount of data
* completed by the AIO job so far.
*/
#define aio_done backend3
static STAILQ_HEAD(, task) soaio_jobs;
static struct mtx soaio_jobs_lock;
static struct task soaio_kproc_task;
@ -567,7 +573,7 @@ soaio_process_job(struct socket *so, struct sockbuf *sb, struct kaiocb *job)
td_savedcred = td->td_ucred;
td->td_ucred = job->cred;
done = job->uaiocb._aiocb_private.status;
done = job->aio_done;
cnt = job->uaiocb.aio_nbytes - done;
iov.iov_base = (void *)((uintptr_t)job->uaiocb.aio_buf + done);
iov.iov_len = cnt;
@ -604,7 +610,7 @@ soaio_process_job(struct socket *so, struct sockbuf *sb, struct kaiocb *job)
}
done += cnt - uio.uio_resid;
job->uaiocb._aiocb_private.status = done;
job->aio_done = done;
td->td_ucred = td_savedcred;
if (error == EWOULDBLOCK) {
@ -740,7 +746,7 @@ soo_aio_cancel(struct kaiocb *job)
sb->sb_flags &= ~SB_AIO;
SOCKBUF_UNLOCK(sb);
done = job->uaiocb._aiocb_private.status;
done = job->aio_done;
if (done != 0)
aio_complete(job, done, 0);
else
@ -774,7 +780,6 @@ soo_aio_queue(struct file *fp, struct kaiocb *job)
if (!aio_set_cancel_function(job, soo_aio_cancel))
panic("new job was cancelled");
TAILQ_INSERT_TAIL(&sb->sb_aiojobq, job, list);
job->uaiocb._aiocb_private.status = 0;
if (!(sb->sb_flags & SB_AIO_RUNNING)) {
if (soaio_ready(so, sb))
sowakeup_aio(so, sb);

View File

@ -520,7 +520,6 @@ aio_free_entry(struct kaiocb *job)
sigqueue_take(&job->ksi);
PROC_UNLOCK(p);
MPASS(job->bp == NULL);
AIO_UNLOCK(ki);
/*

View File

@ -121,10 +121,6 @@ struct kaiocb {
int jobflags; /* (a) job flags */
int inputcharge; /* (*) input blocks */
int outputcharge; /* (*) output blocks */
struct bio *bp; /* (*) BIO backend BIO pointer */
struct buf *pbuf; /* (*) BIO backend buffer pointer */
struct vm_page *pages[btoc(MAXPHYS)+1]; /* BIO backend pages */
int npages; /* BIO backend number of pages */
struct proc *userproc; /* (*) user process */
struct ucred *cred; /* (*) active credential when created */
struct file *fd_file; /* (*) pointer to file structure */
@ -134,9 +130,25 @@ struct kaiocb {
struct aiocb uaiocb; /* (*) copy of user I/O control block */
ksiginfo_t ksi; /* (a) realtime signal info */
uint64_t seqno; /* (*) job number */
int pending; /* (a) number of pending I/O, aio_fsync only */
aio_cancel_fn_t *cancel_fn; /* (a) backend cancel function */
aio_handle_fn_t *handle_fn; /* (c) backend handle function */
union { /* Backend-specific data fields */
struct { /* BIO backend */
struct bio *bp; /* (*) BIO pointer */
struct buf *pbuf; /* (*) buffer pointer */
struct vm_page *pages[btoc(MAXPHYS)+1]; /* (*) */
int npages; /* (*) number of pages */
};
struct { /* fsync() requests */
int pending; /* (a) number of pending I/O */
};
struct {
void *backend1;
void *backend2;
long backend3;
int backend4;
};
};
};
struct socket;