From owner-svn-src-all@freebsd.org Fri Jul 29 18:26:16 2016 Return-Path: Delivered-To: svn-src-all@mailman.ysv.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:1900:2254:206a::19:1]) by mailman.ysv.freebsd.org (Postfix) with ESMTP id EABE9BA78EF; Fri, 29 Jul 2016 18:26:16 +0000 (UTC) (envelope-from jhb@FreeBSD.org) Received: from repo.freebsd.org (repo.freebsd.org [IPv6:2610:1c1:1:6068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id C4D4E1EA9; Fri, 29 Jul 2016 18:26:16 +0000 (UTC) (envelope-from jhb@FreeBSD.org) Received: from repo.freebsd.org ([127.0.1.37]) by repo.freebsd.org (8.15.2/8.15.2) with ESMTP id u6TIQF7X009125; Fri, 29 Jul 2016 18:26:15 GMT (envelope-from jhb@FreeBSD.org) Received: (from jhb@localhost) by repo.freebsd.org (8.15.2/8.15.2/Submit) id u6TIQFlM009123; Fri, 29 Jul 2016 18:26:15 GMT (envelope-from jhb@FreeBSD.org) Message-Id: <201607291826.u6TIQFlM009123@repo.freebsd.org> X-Authentication-Warning: repo.freebsd.org: jhb set sender to jhb@FreeBSD.org using -f From: John Baldwin Date: Fri, 29 Jul 2016 18:26:15 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r303501 - in head: sys/kern tests/sys/aio X-SVN-Group: head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.22 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 29 Jul 2016 18:26:17 -0000 Author: jhb Date: Fri Jul 29 18:26:15 2016 New Revision: 303501 URL: https://svnweb.freebsd.org/changeset/base/303501 Log: Fix locking issues with aio_fsync(). - Use correct lock in aio_cancel_sync when dequeueing job. - Add _locked variants of aio_set/clear_cancel_function and use those to avoid lock recursion when adding and removing fsync jobs to the per-process sync queue. - While here, add a basic test for aio_fsync(). PR: 211390 Reported by: Randy Westlund MFC after: 1 week Sponsored by: Chelsio Communications Differential Revision: https://reviews.freebsd.org/D7339 Modified: head/sys/kern/vfs_aio.c head/tests/sys/aio/aio_test.c Modified: head/sys/kern/vfs_aio.c ============================================================================== --- head/sys/kern/vfs_aio.c Fri Jul 29 18:10:59 2016 (r303500) +++ head/sys/kern/vfs_aio.c Fri Jul 29 18:26:15 2016 (r303501) @@ -311,6 +311,7 @@ static void aio_proc_rundown_exec(void * static int aio_qphysio(struct proc *p, struct kaiocb *job); static void aio_daemon(void *param); static void aio_bio_done_notify(struct proc *userp, struct kaiocb *job); +static bool aio_clear_cancel_function_locked(struct kaiocb *job); static int aio_kick(struct proc *userp); static void aio_kick_nowait(struct proc *userp); static void aio_kick_helper(void *context, int pending); @@ -919,7 +920,7 @@ notification_done: if (--sjob->pending > 0) continue; TAILQ_REMOVE(&ki->kaio_syncqueue, sjob, list); - if (!aio_clear_cancel_function(sjob)) + if (!aio_clear_cancel_function_locked(sjob)) continue; TAILQ_INSERT_TAIL(&ki->kaio_syncready, sjob, list); schedule_fsync = true; @@ -967,40 +968,57 @@ aio_cancel_cleared(struct kaiocb *job) return ((job->jobflags & KAIOCB_CLEARED) != 0); } -bool -aio_clear_cancel_function(struct kaiocb *job) +static bool +aio_clear_cancel_function_locked(struct kaiocb *job) { - struct kaioinfo *ki; - ki = job->userproc->p_aioinfo; - AIO_LOCK(ki); + AIO_LOCK_ASSERT(job->userproc->p_aioinfo, MA_OWNED); MPASS(job->cancel_fn != NULL); if (job->jobflags & KAIOCB_CANCELLING) { job->jobflags |= KAIOCB_CLEARED; - AIO_UNLOCK(ki); return (false); } job->cancel_fn = NULL; - AIO_UNLOCK(ki); return (true); } bool -aio_set_cancel_function(struct kaiocb *job, aio_cancel_fn_t *func) +aio_clear_cancel_function(struct kaiocb *job) { struct kaioinfo *ki; + bool ret; ki = job->userproc->p_aioinfo; AIO_LOCK(ki); - if (job->jobflags & KAIOCB_CANCELLED) { - AIO_UNLOCK(ki); + ret = aio_clear_cancel_function_locked(job); + AIO_UNLOCK(ki); + return (ret); +} + +static bool +aio_set_cancel_function_locked(struct kaiocb *job, aio_cancel_fn_t *func) +{ + + AIO_LOCK_ASSERT(job->userproc->p_aioinfo, MA_OWNED); + if (job->jobflags & KAIOCB_CANCELLED) return (false); - } job->cancel_fn = func; - AIO_UNLOCK(ki); return (true); } +bool +aio_set_cancel_function(struct kaiocb *job, aio_cancel_fn_t *func) +{ + struct kaioinfo *ki; + bool ret; + + ki = job->userproc->p_aioinfo; + AIO_LOCK(ki); + ret = aio_set_cancel_function_locked(job, func); + AIO_UNLOCK(ki); + return (ret); +} + void aio_complete(struct kaiocb *job, long status, int error) { @@ -1655,10 +1673,10 @@ aio_cancel_sync(struct kaiocb *job) struct kaioinfo *ki; ki = job->userproc->p_aioinfo; - mtx_lock(&aio_job_mtx); + AIO_LOCK(ki); if (!aio_cancel_cleared(job)) TAILQ_REMOVE(&ki->kaio_syncqueue, job, list); - mtx_unlock(&aio_job_mtx); + AIO_UNLOCK(ki); aio_cancel(job); } @@ -1718,7 +1736,8 @@ queueit: } } if (job->pending != 0) { - if (!aio_set_cancel_function(job, aio_cancel_sync)) { + if (!aio_set_cancel_function_locked(job, + aio_cancel_sync)) { AIO_UNLOCK(ki); aio_cancel(job); return (0); Modified: head/tests/sys/aio/aio_test.c ============================================================================== --- head/tests/sys/aio/aio_test.c Fri Jul 29 18:10:59 2016 (r303500) +++ head/tests/sys/aio/aio_test.c Fri Jul 29 18:26:15 2016 (r303501) @@ -924,6 +924,88 @@ ATF_TC_BODY(aio_socket_short_write_cance close(s[0]); } +/* + * This test just performs a basic test of aio_fsync(). + */ +ATF_TC_WITHOUT_HEAD(aio_fsync_test); +ATF_TC_BODY(aio_fsync_test, tc) +{ + struct aiocb synccb, *iocbp; + struct { + struct aiocb iocb; + bool done; + char *buffer; + } buffers[16]; + struct stat sb; + char pathname[PATH_MAX]; + ssize_t rval; + unsigned i; + int fd; + + ATF_REQUIRE_KERNEL_MODULE("aio"); + ATF_REQUIRE_UNSAFE_AIO(); + + strcpy(pathname, PATH_TEMPLATE); + fd = mkstemp(pathname); + ATF_REQUIRE_MSG(fd != -1, "mkstemp failed: %s", strerror(errno)); + unlink(pathname); + + ATF_REQUIRE(fstat(fd, &sb) == 0); + ATF_REQUIRE(sb.st_blksize != 0); + ATF_REQUIRE(ftruncate(fd, sb.st_blksize * nitems(buffers)) == 0); + + /* + * Queue several asynchronous write requests. Hopefully this + * forces the aio_fsync() request to be deferred. There is no + * reliable way to guarantee that however. + */ + srandomdev(); + for (i = 0; i < nitems(buffers); i++) { + buffers[i].done = false; + memset(&buffers[i].iocb, 0, sizeof(buffers[i].iocb)); + buffers[i].buffer = malloc(sb.st_blksize); + aio_fill_buffer(buffers[i].buffer, sb.st_blksize, random()); + buffers[i].iocb.aio_fildes = fd; + buffers[i].iocb.aio_buf = buffers[i].buffer; + buffers[i].iocb.aio_nbytes = sb.st_blksize; + buffers[i].iocb.aio_offset = sb.st_blksize * i; + ATF_REQUIRE(aio_write(&buffers[i].iocb) == 0); + } + + /* Queue the aio_fsync request. */ + memset(&synccb, 0, sizeof(synccb)); + synccb.aio_fildes = fd; + ATF_REQUIRE(aio_fsync(O_SYNC, &synccb) == 0); + + /* Wait for requests to complete. */ + for (;;) { + next: + rval = aio_waitcomplete(&iocbp, NULL); + ATF_REQUIRE(iocbp != NULL); + if (iocbp == &synccb) { + ATF_REQUIRE(rval == 0); + break; + } + + for (i = 0; i < nitems(buffers); i++) { + if (iocbp == &buffers[i].iocb) { + ATF_REQUIRE(buffers[i].done == false); + ATF_REQUIRE(rval == sb.st_blksize); + buffers[i].done = true; + goto next; + } + } + + ATF_REQUIRE_MSG(false, "unmatched AIO request"); + } + + for (i = 0; i < nitems(buffers); i++) + ATF_REQUIRE_MSG(buffers[i].done, + "AIO request %u did not complete", i); + + close(fd); +} + ATF_TP_ADD_TCS(tp) { @@ -937,6 +1019,7 @@ ATF_TP_ADD_TCS(tp) ATF_TP_ADD_TC(tp, aio_socket_two_reads); ATF_TP_ADD_TC(tp, aio_socket_blocking_short_write); ATF_TP_ADD_TC(tp, aio_socket_short_write_cancel); + ATF_TP_ADD_TC(tp, aio_fsync_test); return (atf_no_error()); }