Blob Blame History Raw
autofs-5.0.3 - fix submount shutdown handling.

From: Ian Kent <raven@themaw.net>

When using submount maps on a busy system autofs can hang.

This problem comes about because of processes walking into the
submount filesystem when it is in the process of shutting down.
While this race has been fixed for other types of mounts it
still isn't possible to to block processes from walking into
submounts that are expiring so we need to be able to recover
when this happens.

This patch improves the submount shutdown logic and allows
submounts that become busy during shutdown to recover.
---

 CHANGELOG              |    1 
 daemon/automount.c     |  208 +++++++++++++++++++------------------------
 daemon/direct.c        |   97 ++++++++++++++------
 daemon/indirect.c      |  114 ++++++++++++++++++-----
 daemon/lookup.c        |   11 --
 daemon/state.c         |  235 +++++++++++++++++++++++++++++++++----------------
 include/automount.h    |   17 ---
 include/master.h       |    5 -
 include/state.h        |    9 +
 lib/alarm.c            |   14 --
 lib/master.c           |  182 +++++++------------------------------
 modules/mount_autofs.c |    2 
 12 files changed, 459 insertions(+), 436 deletions(-)


--- autofs-5.0.3.orig/CHANGELOG
+++ autofs-5.0.3/CHANGELOG
@@ -24,6 +24,7 @@
 - add command line option to override check for daemon already running.
 - don't use proc file system when checking if the daemon is running.
 - make handle_mounts startup condition distinct.
+- fix submount shutdown recovery handling.
  
 14/01/2008 autofs-5.0.3
 -----------------------
--- autofs-5.0.3.orig/daemon/automount.c
+++ autofs-5.0.3/daemon/automount.c
@@ -369,6 +369,18 @@ int count_mounts(unsigned logopt, const 
 
 static void check_rm_dirs(struct autofs_point *ap, const char *path, int incl)
 {
+	/*
+	 * If we're a submount the kernel can't know we're trying to
+	 * shutdown and so cannot block processes walking into the
+	 * mount point directory. If this is the call to umount_multi()
+	 * made during shutdown (incl == 0) we have to leave any mount
+	 * point directories in place so we can recover if needed. The
+	 * umount itself will clean these directories up for us
+	 * automagically.
+	 */
+	if (!incl && ap->submount)
+		return;
+
 	if ((!ap->ghost) ||
 	    (ap->state == ST_SHUTDOWN_PENDING ||
 	     ap->state == ST_SHUTDOWN_FORCE ||
@@ -390,8 +402,6 @@ static void update_map_cache(struct auto
 	else
 		key = path;
 
-	pthread_cleanup_push(master_source_lock_cleanup, ap->entry);
-	master_source_readlock(ap->entry);
 	map = ap->entry->maps;
 	while (map) {
 		struct mapent *me = NULL;
@@ -413,7 +423,6 @@ static void update_map_cache(struct auto
 
 		map = map->next;
 	}
-	pthread_cleanup_pop(1);
 
 	return;
 }
@@ -918,38 +927,22 @@ static int get_pkt(struct autofs_point *
 		}
 
 		if (fds[1].revents & POLLIN) {
-			enum states next_state, post_state;
+			enum states next_state;
 			size_t read_size = sizeof(next_state);
 			int state_pipe;
 
-			next_state = post_state = ST_INVAL;
+			next_state = ST_INVAL;
 
-			state_mutex_lock(ap);
+			st_mutex_lock();
 
 			state_pipe = ap->state_pipe[0];
 
 			if (fullread(state_pipe, &next_state, read_size)) {
-				state_mutex_unlock(ap);
+				st_mutex_unlock();
 				continue;
 			}
 
-			if (next_state != ST_INVAL && next_state != ap->state) {
-				if (next_state != ST_SHUTDOWN)
-					post_state = next_state;
-				else
-					ap->state = ST_SHUTDOWN;
-			}
-
-			state_mutex_unlock(ap);
-
-			if (post_state != ST_INVAL) {
-				if (post_state == ST_SHUTDOWN_PENDING ||
-				    post_state == ST_SHUTDOWN_FORCE) {
-					alarm_delete(ap);
-					st_remove_tasks(ap);
-				}
-				st_add_task(ap, post_state);
-			}
+			st_mutex_unlock();
 
 			if (next_state == ST_SHUTDOWN)
 				return -1;
@@ -985,11 +978,14 @@ int do_expire(struct autofs_point *ap, c
 
 	info(ap->logopt, "expiring path %s", buf);
 
+	pthread_cleanup_push(master_source_lock_cleanup, ap->entry);
+	master_source_readlock(ap->entry);
 	ret = umount_multi(ap, buf, 1);
 	if (ret == 0)
 		info(ap->logopt, "expired %s", buf);
 	else
 		warn(ap->logopt, "couldn't complete expire of %s", buf);
+	pthread_cleanup_pop(1);
 
 	return ret;
 }
@@ -1069,7 +1065,7 @@ static int mount_autofs(struct autofs_po
 	if (status < 0)
 		return -1;
 
-	ap->state = ST_READY;
+	st_add_task(ap, ST_READY);
 
 	return 0;
 }
@@ -1423,44 +1419,6 @@ static void return_start_status(void *ar
 		fatal(status);
 }
 
-static void mutex_operation_wait(pthread_mutex_t *mutex)
-{
-	int status;
-
-	/*
-	 * Unlock a mutex, but wait for a pending operation
-	 * if one is in progress
-	 */
-	status = pthread_mutex_trylock(mutex);
-	if (status) {
-		if (status == EBUSY) {
-			/* Mutex locked - do we own it */
-			status = pthread_mutex_unlock(mutex);
-			if (status) {
-				if (status != EPERM)
-					fatal(status);
-			} else
-				return;
-
-			status = pthread_mutex_lock(mutex);
-			if (status)
-				fatal(status);
-		} else
-			fatal(status);
-
-		/* Operation complete, release it */
-		status = pthread_mutex_unlock(mutex);
-		if (status)
-			fatal(status);
-	} else {
-		status = pthread_mutex_unlock(mutex);
-		if (status)
-			fatal(status);
-	}
-
-	return;
-}
-
 int handle_mounts_startup_cond_init(struct startup_cond *suc)
 {
 	int status;
@@ -1526,22 +1484,25 @@ static void handle_mounts_cleanup(void *
 	if (!submount && strcmp(ap->path, "/-") && ap->dir_created)
 		clean = 1;
 
-	/* If we have been canceled then we may hold the state mutex. */
-	mutex_operation_wait(&ap->state_mutex);
+	if (submount) {
+		/* We are finishing up */
+		ap->parent->submnt_count--;
+		list_del_init(&ap->mounts);
+	}
 
-	alarm_delete(ap);
-	st_remove_tasks(ap);
+	master_remove_mapent(ap->entry);
+	master_source_unlock(ap->entry);
 
-	umount_autofs(ap, 1);
+	if (submount) {
+		mounts_mutex_unlock(ap->parent);
+		master_source_unlock(ap->parent->entry);
+	}
+	master_mutex_unlock();
 
 	destroy_logpri_fifo(ap);
-	master_signal_submount(ap, MASTER_SUBMNT_JOIN);
-	master_remove_mapent(ap->entry);
 	master_free_mapent_sources(ap->entry, 1);
 	master_free_mapent(ap->entry);
 
-	sched_yield();
-
 	if (clean) {
 		if (rmdir(path) == -1) {
 			char *estr = strerror_r(errno, buf, MAX_ERR_BUF);
@@ -1572,8 +1533,6 @@ void *handle_mounts(void *arg)
 	pthread_cleanup_push(return_start_status, suc);
 	pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cancel_state);
 
-	state_mutex_lock(ap);
-
 	status = pthread_mutex_lock(&suc->mutex);
 	if (status) {
 		logerr("failed to lock startup condition mutex!");
@@ -1583,7 +1542,6 @@ void *handle_mounts(void *arg)
 	if (mount_autofs(ap) < 0) {
 		crit(ap->logopt, "mount of %s failed!", ap->path);
 		suc->status = 1;
-		state_mutex_unlock(ap);
 		umount_autofs(ap, 1);
 		pthread_setcancelstate(cancel_state, NULL);
 		pthread_exit(NULL);
@@ -1600,56 +1558,70 @@ void *handle_mounts(void *arg)
 	if (!ap->submount && ap->exp_timeout)
 		alarm_add(ap, ap->exp_runfreq + rand() % ap->exp_runfreq);
 
-	pthread_cleanup_push(handle_mounts_cleanup, ap);
 	pthread_setcancelstate(cancel_state, NULL);
 
-	state_mutex_unlock(ap);
-
 	while (ap->state != ST_SHUTDOWN) {
 		if (handle_packet(ap)) {
-			int ret, result;
+			int ret, cur_state;
+
+			/*
+			 * If we're a submount we need to ensure our parent
+			 * doesn't try to mount us again until our shutdown
+			 * is complete and that any outstanding mounts are
+			 * completed before we try to shutdown.
+			 */
+			pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cur_state);
+
+			master_mutex_lock();
+
+			if (ap->submount) {
+				master_source_writelock(ap->parent->entry);
+				mounts_mutex_lock(ap->parent);
+			}
+
+			master_source_writelock(ap->entry);
+
+			if (ap->state != ST_SHUTDOWN) {
+				if (!ap->submount)
+					alarm_add(ap, ap->exp_runfreq);
+				/* Return to ST_READY is done immediately */
+				st_add_task(ap, ST_READY);
+				master_source_unlock(ap->entry);
+				if (ap->submount) {
+					mounts_mutex_unlock(ap->parent);
+					master_source_unlock(ap->parent->entry);
+				}
+
+				master_mutex_unlock();
+
+				pthread_setcancelstate(cur_state, NULL);
+				continue;
+			}
+
+			alarm_delete(ap);
+			st_remove_tasks(ap);
+			st_wait_task(ap, ST_ANY, 0);
 
-			state_mutex_lock(ap);
 			/*
 			 * For a direct mount map all mounts have already gone
-			 * by the time we get here.
+			 * by the time we get here and since we only ever
+			 * umount direct mounts at shutdown there is no need
+			 * to check for possible recovery.
 			 */
 			if (ap->type == LKP_DIRECT) {
-				status = 1;
-				state_mutex_unlock(ap);
+				umount_autofs(ap, 1);
 				break;
 			}
 
 			/*
-			 * If the ioctl fails assume the kernel doesn't have
-			 * AUTOFS_IOC_ASKUMOUNT and just continue.
+			 * If umount_autofs returns non-zero it wasn't able
+			 * to complete the umount and has left the mount intact
+			 * so we can continue. This can happen if a lookup
+			 * occurs while we're trying to umount.
 			 */
-			ret = ioctl(ap->ioctlfd, AUTOFS_IOC_ASKUMOUNT, &result);
-			if (ret == -1) {
-				state_mutex_unlock(ap);
+			ret = umount_autofs(ap, 1);
+			if (!ret)
 				break;
-			}
-
-			/* OK to exit */
-			if (ap->state == ST_SHUTDOWN) {
-				if (result) {
-					state_mutex_unlock(ap);
-					break;
-				}
-#ifdef ENABLE_IGNORE_BUSY_MOUNTS
-				/*
-				 * There weren't any active mounts but if the
-				 * filesystem is busy there may be a mount
-				 * request in progress so return to the ready
-				 * state unless a shutdown has been explicitly
-				 * requested.
-				 */
-				if (ap->shutdown) {
-					state_mutex_unlock(ap);
-					break;
-				}
-#endif
-			}
 
 			/* Failed shutdown returns to ready */
 			warn(ap->logopt,
@@ -1657,14 +1629,22 @@ void *handle_mounts(void *arg)
 			     ap->path);
 			if (!ap->submount)
 				alarm_add(ap, ap->exp_runfreq);
-			nextstate(ap->state_pipe[1], ST_READY);
+			/* Return to ST_READY is done immediately */
+			st_add_task(ap, ST_READY);
+			master_source_unlock(ap->entry);
+			if (ap->submount) {
+				mounts_mutex_unlock(ap->parent);
+				master_source_unlock(ap->parent->entry);
+			}
+
+			master_mutex_unlock();
+
+			pthread_setcancelstate(cur_state, NULL);
 
-			state_mutex_unlock(ap);
 		}
 	}
 
-	pthread_cleanup_pop(1);
-	sched_yield();
+	handle_mounts_cleanup(ap);
 
 	return NULL;
 }
--- autofs-5.0.3.orig/daemon/direct.c
+++ autofs-5.0.3/daemon/direct.c
@@ -216,8 +216,6 @@ int umount_autofs_direct(struct autofs_p
 
 	mnts = tree_make_mnt_tree(_PROC_MOUNTS, "/");
 	pthread_cleanup_push(mnts_cleanup, mnts);
-	pthread_cleanup_push(master_source_lock_cleanup, ap->entry);
-	master_source_readlock(ap->entry);
 	nc = ap->entry->master->nc;
 	cache_readlock(nc);
 	pthread_cleanup_push(cache_lock_cleanup, nc);
@@ -244,7 +242,6 @@ int umount_autofs_direct(struct autofs_p
 	}
 	pthread_cleanup_pop(1);
 	pthread_cleanup_pop(1);
-	pthread_cleanup_pop(1);
 
 	return 0;
 }
@@ -572,9 +569,10 @@ int umount_autofs_offset(struct autofs_p
 			return 1;
 		} else if (!status) {
 			if (ap->state != ST_SHUTDOWN_FORCE) {
-				error(ap->logopt,
-				      "ask umount returned busy for %s",
-				      me->key);
+				if (ap->shutdown)
+					error(ap->logopt,
+					     "ask umount returned busy for %s",
+					     me->key);
 				return 1;
 			} else {
 				me->ioctlfd = -1;
@@ -904,7 +902,10 @@ void *expire_proc_direct(void *arg)
 		 * All direct mounts must be present in the map
 		 * entry cache.
 		 */
+		pthread_cleanup_push(master_source_lock_cleanup, ap->entry);
+		master_source_readlock(ap->entry);
 		me = lookup_source_mapent(ap, next->path, LKP_DISTINCT);
+		pthread_cleanup_pop(1);
 		if (!me)
 			continue;
 
@@ -1110,6 +1111,8 @@ int handle_packet_expire_direct(struct a
 	struct pending_args *mt;
 	char buf[MAX_ERR_BUF];
 	pthread_t thid;
+	struct timespec wait;
+	struct timeval now;
 	int status, state;
 
 	pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &state);
@@ -1124,7 +1127,7 @@ int handle_packet_expire_direct(struct a
 	 * and since it got mounted we have to trust that
 	 * there is an entry in the cache.
 	 */
-	master_source_readlock(ap->entry);
+	master_source_writelock(ap->entry);
 	map = ap->entry->maps;
 	while (map) {
 		mc = map->mc;
@@ -1135,7 +1138,6 @@ int handle_packet_expire_direct(struct a
 		cache_unlock(mc);
 		map = map->next;
 	}
-	master_source_unlock(ap->entry);
 
 	if (!me) {
 		/*
@@ -1144,10 +1146,28 @@ int handle_packet_expire_direct(struct a
 		 */
 		crit(ap->logopt, "can't find map entry for (%lu,%lu)",
 		    (unsigned long) pkt->dev, (unsigned long) pkt->ino);
+		cache_unlock(mc);
+		master_source_unlock(ap->entry);
 		pthread_setcancelstate(state, NULL);
 		return 1;
 	}
 
+	/* Can't expire it if it isn't mounted */
+	if (me->ioctlfd == -1) {
+		int ioctlfd = open(me->key, O_RDONLY);
+		if (ioctlfd == -1) {
+			crit(ap->logopt, "can't open ioctlfd for %s",
+			     me->key);
+			pthread_setcancelstate(state, NULL);
+			return 1;
+		}
+		send_ready(ap->logopt, ioctlfd, pkt->wait_queue_token);
+		close(ioctlfd);
+		cache_unlock(mc);
+		master_source_unlock(ap->entry);
+		pthread_setcancelstate(state, NULL);
+		return 0;
+	}
 
 	mt = malloc(sizeof(struct pending_args));
 	if (!mt) {
@@ -1155,6 +1175,7 @@ int handle_packet_expire_direct(struct a
 		error(ap->logopt, "malloc: %s", estr);
 		send_fail(ap->logopt, me->ioctlfd, pkt->wait_queue_token);
 		cache_unlock(mc);
+		master_source_unlock(ap->entry);
 		pthread_setcancelstate(state, NULL);
 		return 1;
 	}
@@ -1184,6 +1205,7 @@ int handle_packet_expire_direct(struct a
 		error(ap->logopt, "expire thread create failed");
 		send_fail(ap->logopt, mt->ioctlfd, pkt->wait_queue_token);
 		cache_unlock(mc);
+		master_source_unlock(ap->entry);
 		expire_mutex_unlock(NULL);
 		pending_cond_destroy(mt);
 		free_pending_args(mt);
@@ -1192,14 +1214,18 @@ int handle_packet_expire_direct(struct a
 	}
 
 	cache_unlock(mc);
+	master_source_unlock(ap->entry);
 
 	pthread_cleanup_push(expire_mutex_unlock, NULL);
 	pthread_setcancelstate(state, NULL);
 
 	mt->signaled = 0;
 	while (!mt->signaled) {
+		gettimeofday(&now, NULL);
+		wait.tv_sec = now.tv_sec + 2;
+		wait.tv_nsec = now.tv_usec * 1000;
 		status = pthread_cond_wait(&mt->cond, &ea_mutex);
-		if (status)
+		if (status && status != ETIMEDOUT)
 			fatal(status);
 	}
 
@@ -1263,6 +1289,9 @@ static void *do_mount_direct(void *arg)
 	if (status == -1) {
 		error(ap->logopt,
 		      "can't stat direct mount trigger %s", mt.name);
+		send_fail(ap->logopt,
+			  mt.ioctlfd, mt.wait_queue_token);
+		close(mt.ioctlfd);
 		pthread_setcancelstate(state, NULL);
 		pthread_exit(NULL);
 	}
@@ -1272,6 +1301,8 @@ static void *do_mount_direct(void *arg)
 		error(ap->logopt,
 		     "direct trigger not valid or already mounted %s",
 		     mt.name);
+		send_ready(ap->logopt, mt.ioctlfd, mt.wait_queue_token);
+		close(mt.ioctlfd);
 		pthread_setcancelstate(state, NULL);
 		pthread_exit(NULL);
 	}
@@ -1290,19 +1321,12 @@ static void *do_mount_direct(void *arg)
 	pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &state);
 	if (status) {
 		struct mapent *me;
-		int real_mount, set_fd;
-		cache_readlock(mt.mc);
+		cache_writelock(mt.mc);
 		me = cache_lookup_distinct(mt.mc, mt.name);
-		real_mount = is_mounted(_PATH_MOUNTED, me->key, MNTS_REAL);
-		set_fd = (real_mount || me->multi == me);
-		cache_unlock(mt.mc);
-		if (set_fd) {
+		if (me)
 			me->ioctlfd = mt.ioctlfd;
-			send_ready(ap->logopt, mt.ioctlfd, mt.wait_queue_token);
-		} else {
-			send_ready(ap->logopt, mt.ioctlfd, mt.wait_queue_token);
-			close(mt.ioctlfd);
-		}
+		send_ready(ap->logopt, mt.ioctlfd, mt.wait_queue_token);
+		cache_unlock(mt.mc);
 		info(ap->logopt, "mounted %s", mt.name);
 	} else {
 		send_fail(ap->logopt, mt.ioctlfd, mt.wait_queue_token);
@@ -1325,11 +1349,21 @@ int handle_packet_missing_direct(struct 
 	struct pending_args *mt;
 	char buf[MAX_ERR_BUF];
 	int status = 0;
+	struct timespec wait;
+	struct timeval now;
 	int ioctlfd, len, cl_flags, state;
 
 	pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &state);
 
-	master_source_readlock(ap->entry);
+	/*
+	 * If our parent is a direct or offset mount that has been
+	 * covered by a mount and another lookup occurs after the
+	 * mount but before the device and inode are set in the
+	 * cache entry we will not be able to find the mapent. So
+	 * we must take the source writelock to ensure the parent
+	 * has mount is complete before we look for the entry.
+	 */
+	master_source_writelock(ap->entry);
 	map = ap->entry->maps;
 	while (map) {
 		/*
@@ -1349,7 +1383,6 @@ int handle_packet_missing_direct(struct 
 		cache_unlock(mc);
 		map = map->next;
 	}
-	master_source_unlock(ap->entry);
 
 	if (!me) {
 		/*
@@ -1358,6 +1391,8 @@ int handle_packet_missing_direct(struct 
 		 */
 		logerr("can't find map entry for (%lu,%lu)",
 		    (unsigned long) pkt->dev, (unsigned long) pkt->ino);
+		cache_unlock(mc);
+		master_source_unlock(ap->entry);
 		pthread_setcancelstate(state, NULL);
 		return 1;
 	}
@@ -1371,6 +1406,7 @@ int handle_packet_missing_direct(struct 
 
 	if (ioctlfd == -1) {
 		cache_unlock(mc);
+		master_source_unlock(ap->entry);
 		pthread_setcancelstate(state, NULL);
 		crit(ap->logopt, "failed to create ioctl fd for %s", me->key);
 		/* TODO:  how do we clear wait q in kernel ?? */
@@ -1386,12 +1422,11 @@ int handle_packet_missing_direct(struct 
 		  (unsigned long) pkt->wait_queue_token, me->key, pkt->pid);
 
 	/* Ignore packet if we're trying to shut down */
-	if (ap->shutdown ||
-	    ap->state == ST_SHUTDOWN_FORCE ||
-	    ap->state == ST_SHUTDOWN) {
+	if (ap->shutdown || ap->state == ST_SHUTDOWN_FORCE) {
 		send_fail(ap->logopt, ioctlfd, pkt->wait_queue_token);
 		close(ioctlfd);
 		cache_unlock(mc);
+		master_source_unlock(ap->entry);
 		pthread_setcancelstate(state, NULL);
 		return 1;
 	}
@@ -1402,6 +1437,7 @@ int handle_packet_missing_direct(struct 
 		send_fail(ap->logopt, ioctlfd, pkt->wait_queue_token);
 		close(ioctlfd);
 		cache_unlock(mc);
+		master_source_unlock(ap->entry);
 		pthread_setcancelstate(state, NULL);
 		return 1;
 	}
@@ -1413,6 +1449,7 @@ int handle_packet_missing_direct(struct 
 		send_fail(ap->logopt, ioctlfd, pkt->wait_queue_token);
 		close(ioctlfd);
 		cache_unlock(mc);
+		master_source_unlock(ap->entry);
 		pthread_setcancelstate(state, NULL);
 		return 1;
 	}
@@ -1447,6 +1484,7 @@ int handle_packet_missing_direct(struct 
 		send_fail(ap->logopt, ioctlfd, pkt->wait_queue_token);
 		close(ioctlfd);
 		cache_unlock(mc);
+		master_source_unlock(ap->entry);
 		mount_mutex_unlock(mt);
 		pending_cond_destroy(mt);
 		pending_mutex_destroy(mt);
@@ -1456,6 +1494,8 @@ int handle_packet_missing_direct(struct 
 	}
 
 	cache_unlock(mc);
+	master_source_unlock(ap->entry);
+
 	pthread_cleanup_push(free_pending_args, mt);
 	pthread_cleanup_push(pending_mutex_destroy, mt);
 	pthread_cleanup_push(pending_cond_destroy, mt);
@@ -1464,8 +1504,11 @@ int handle_packet_missing_direct(struct 
 
 	mt->signaled = 0;
 	while (!mt->signaled) {
-		status = pthread_cond_wait(&mt->cond, &mt->mutex);
-		if (status)
+		gettimeofday(&now, NULL);
+		wait.tv_sec = now.tv_sec + 2;
+		wait.tv_nsec = now.tv_usec * 1000;
+		status = pthread_cond_timedwait(&mt->cond, &mt->mutex, &wait);
+		if (status && status != ETIMEDOUT)
 			fatal(status);
 	}
 
--- autofs-5.0.3.orig/daemon/indirect.c
+++ autofs-5.0.3/daemon/indirect.c
@@ -230,11 +230,8 @@ int mount_autofs_indirect(struct autofs_
 	return 0;
 }
 
-int umount_autofs_indirect(struct autofs_point *ap)
+static void close_mount_fds(struct autofs_point *ap)
 {
-	char buf[MAX_ERR_BUF];
-	int ret, rv, retries;
-
 	/*
 	 * Since submounts look after themselves the parent never knows
 	 * it needs to close the ioctlfd for offset mounts so we have
@@ -244,6 +241,25 @@ int umount_autofs_indirect(struct autofs
 	if (ap->submount)
 		lookup_source_close_ioctlfd(ap->parent, ap->path);
 
+	close(ap->state_pipe[0]);
+	close(ap->state_pipe[1]);
+	ap->state_pipe[0] = -1;
+	ap->state_pipe[1] = -1;
+
+	if (ap->pipefd >= 0)
+		close(ap->pipefd);
+
+	if (ap->kpipefd >= 0)
+		close(ap->kpipefd);
+
+	return;
+}
+
+int umount_autofs_indirect(struct autofs_point *ap)
+{
+	char buf[MAX_ERR_BUF];
+	int ret, rv, retries;
+
 	/* If we are trying to shutdown make sure we can umount */
 	rv = ioctl(ap->ioctlfd, AUTOFS_IOC_ASKUMOUNT, &ret);
 	if (rv == -1) {
@@ -251,24 +267,20 @@ int umount_autofs_indirect(struct autofs
 		logerr("ioctl failed: %s", estr);
 		return 1;
 	} else if (!ret) {
+#if defined(ENABLE_IGNORE_BUSY_MOUNTS) || defined(ENABLE_FORCED_SHUTDOWN)
+		if (!ap->shutdown)
+			return 1;
 		error(ap->logopt, "ask umount returned busy %s", ap->path);
+#else
 		return 1;
+#endif
 	}
 
-	ioctl(ap->ioctlfd, AUTOFS_IOC_CATATONIC, 0);
+	if (ap->shutdown)
+		ioctl(ap->ioctlfd, AUTOFS_IOC_CATATONIC, 0);
+
 	close(ap->ioctlfd);
 	ap->ioctlfd = -1;
-	close(ap->state_pipe[0]);
-	close(ap->state_pipe[1]);
-	ap->state_pipe[0] = -1;
-	ap->state_pipe[1] = -1;
-
-	if (ap->pipefd >= 0)
-		close(ap->pipefd);
-
-	if (ap->kpipefd >= 0)
-		close(ap->kpipefd);
-
 	sched_yield();
 
 	retries = UMOUNT_RETRIES;
@@ -285,24 +297,61 @@ int umount_autofs_indirect(struct autofs
 		case EINVAL:
 			error(ap->logopt,
 			      "mount point %s does not exist", ap->path);
+			close_mount_fds(ap);
 			return 0;
 			break;
 		case EBUSY:
-			error(ap->logopt,
+			debug(ap->logopt,
 			      "mount point %s is in use", ap->path);
-			if (ap->state == ST_SHUTDOWN_FORCE)
+			if (ap->state == ST_SHUTDOWN_FORCE) {
+				close_mount_fds(ap);
 				goto force_umount;
-			else
-				return 0;
+			} else {
+				int cl_flags;
+				/*
+				 * If the umount returns EBUSY there may be
+				 * a mount request in progress so we need to
+				 * recover unless we have been explicitly
+				 * asked to shutdown and configure option
+				 * ENABLE_IGNORE_BUSY_MOUNTS is enabled.
+				 */
+#ifdef ENABLE_IGNORE_BUSY_MOUNTS
+				if (ap->shutdown) {
+					close_mount_fds(ap);
+					return 0;
+				}
+#endif
+				ap->ioctlfd = open(ap->path, O_RDONLY);
+				if (ap->ioctlfd < 0) {
+					warn(ap->logopt,
+					     "could not recover autofs path %s",
+					     ap->path);
+					close_mount_fds(ap);
+					return 0;
+				}
+
+				if ((cl_flags = fcntl(ap->ioctlfd, F_GETFD, 0)) != -1) {
+					cl_flags |= FD_CLOEXEC;
+					fcntl(ap->ioctlfd, F_SETFD, cl_flags);
+				}
+			}
 			break;
 		case ENOTDIR:
 			error(ap->logopt, "mount point is not a directory");
+			close_mount_fds(ap);
 			return 0;
 			break;
 		}
 		return 1;
 	}
 
+	/*
+	 * We have successfully umounted the mount so we now close
+	 * the descriptors. The kernel end of the kernel pipe will
+	 * have been put during the umount super block cleanup.
+	 */
+	close_mount_fds(ap);
+
 force_umount:
 	if (rv != 0) {
 		warn(ap->logopt,
@@ -439,9 +488,12 @@ void *expire_proc_indirect(void *arg)
 		 * Otherwise it's a top level indirect mount (possibly
 		 * with offsets in it) and we use the usual ioctlfd.
 		 */
+		pthread_cleanup_push(master_source_lock_cleanup, ap->entry);
+		master_source_readlock(ap->entry);
 		me = lookup_source_mapent(ap, next->path, LKP_DISTINCT);
 		if (!me && ind_key)
 			me = lookup_source_mapent(ap, ind_key, LKP_NORMAL);
+		pthread_cleanup_pop(1);
 		if (!me)
 			continue;
 
@@ -586,6 +638,8 @@ int handle_packet_expire_indirect(struct
 	struct pending_args *mt;
 	char buf[MAX_ERR_BUF];
 	pthread_t thid;
+	struct timespec wait;
+	struct timeval now;
 	int status, state;
 
 	pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &state);
@@ -632,8 +686,11 @@ int handle_packet_expire_indirect(struct
 
 	mt->signaled = 0;
 	while (!mt->signaled) {
-		status = pthread_cond_wait(&mt->cond, &ea_mutex);
-		if (status)
+		gettimeofday(&now, NULL);
+		wait.tv_sec = now.tv_sec + 2;
+		wait.tv_nsec = now.tv_usec * 1000;
+		status = pthread_cond_timedwait(&mt->cond, &ea_mutex, &wait);
+		if (status && status != ETIMEDOUT)
 			fatal(status);
 	}
 
@@ -735,6 +792,8 @@ int handle_packet_missing_indirect(struc
 	pthread_t thid;
 	char buf[MAX_ERR_BUF];
 	struct pending_args *mt;
+	struct timespec wait;
+	struct timeval now;
 	int status, state;
 
 	pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &state);
@@ -743,9 +802,7 @@ int handle_packet_missing_indirect(struc
 		(unsigned long) pkt->wait_queue_token, pkt->name, pkt->pid);
 
 	/* Ignore packet if we're trying to shut down */
-	if (ap->shutdown ||
-	    ap->state == ST_SHUTDOWN_FORCE ||
-	    ap->state == ST_SHUTDOWN) {
+	if (ap->shutdown || ap->state == ST_SHUTDOWN_FORCE) {
 		send_fail(ap->logopt, ap->ioctlfd, pkt->wait_queue_token);
 		pthread_setcancelstate(state, NULL);
 		return 0;
@@ -802,8 +859,11 @@ int handle_packet_missing_indirect(struc
 
 	mt->signaled = 0;
 	while (!mt->signaled) {
-		status = pthread_cond_wait(&mt->cond, &mt->mutex);
-		if (status)
+		gettimeofday(&now, NULL);
+		wait.tv_sec = now.tv_sec + 2;
+		wait.tv_nsec = now.tv_usec * 1000;
+		status = pthread_cond_timedwait(&mt->cond, &mt->mutex, &wait);
+		if (status && status != ETIMEDOUT)
 			fatal(status);
 	}
 
--- autofs-5.0.3.orig/daemon/lookup.c
+++ autofs-5.0.3/daemon/lookup.c
@@ -935,16 +935,10 @@ void lookup_close_lookup(struct autofs_p
 	if (!map)
 		return;
 
-	/*
-	 * Make sure we don't kill the context if a mount
-	 * request has come in while were shutting down.
-	 */
-	master_source_writelock(ap->entry);
 	while (map) {
 		lookup_close_lookup_instances(map);
 		map = map->next;
 	}
-	master_source_unlock(ap->entry);
 
 	return;
 }
@@ -1122,7 +1116,6 @@ struct mapent *lookup_source_mapent(stru
 	struct mapent_cache *mc;
 	struct mapent *me = NULL;
 
-	master_source_readlock(entry);
 	map = entry->maps;
 	while (map) {
 		mc = map->mc;
@@ -1136,7 +1129,6 @@ struct mapent *lookup_source_mapent(stru
 		cache_unlock(mc);
 		map = map->next;
 	}
-	master_source_unlock(entry);
 
 	return me;
 }
@@ -1149,8 +1141,6 @@ int lookup_source_close_ioctlfd(struct a
 	struct mapent *me;
 	int ret = 0;
 
-	pthread_cleanup_push(master_source_lock_cleanup, entry);
-	master_source_readlock(entry);
 	map = entry->maps;
 	while (map) {
 		mc = map->mc;
@@ -1168,7 +1158,6 @@ int lookup_source_close_ioctlfd(struct a
 		cache_unlock(mc);
 		map = map->next;
 	}
-	pthread_cleanup_pop(1);
 
 	return ret;
 }
--- autofs-5.0.3.orig/daemon/state.c
+++ autofs-5.0.3/daemon/state.c
@@ -37,19 +37,19 @@ static LIST_HEAD(state_queue);
 static void st_set_thid(struct autofs_point *, pthread_t);
 static void st_set_done(struct autofs_point *ap);
 
-#define st_mutex_lock() \
-do { \
-	int status = pthread_mutex_lock(&mutex); \
-	if (status) \
-		fatal(status); \
-} while (0)
-
-#define st_mutex_unlock() \
-do { \
-	int status = pthread_mutex_unlock(&mutex); \
-	if (status) \
-		fatal(status); \
-} while (0)
+void st_mutex_lock(void)
+{
+	int status = pthread_mutex_lock(&mutex);
+	if (status)
+		fatal(status);
+}
+
+void st_mutex_unlock(void)
+{
+	int status = pthread_mutex_unlock(&mutex);
+	if (status)
+		fatal(status);
+}
 
 int do_mount_autofs_direct(struct autofs_point *, struct mnt_list *, struct mapent *);
 
@@ -96,21 +96,19 @@ void expire_cleanup(void *arg)
 	pthread_t thid = pthread_self();
 	struct expire_args *ec;
 	struct autofs_point *ap;
-	int statefd, success;
+	int success;
 	enum states next = ST_INVAL;
 
 	ec = (struct expire_args *) arg;
 	ap = ec->ap;
 	success = ec->status;
 
-	state_mutex_lock(ap);
+	st_mutex_lock();
 
 	debug(ap->logopt,
 	      "got thid %lu path %s stat %d",
 	      (unsigned long) thid, ap->path, success);
 
-	statefd = ap->state_pipe[1];
-
 	/* Check to see if expire process finished */
 	if (thid == ap->exp_thread) {
 		int rv, idle;
@@ -199,11 +197,11 @@ void expire_cleanup(void *arg)
 	}
 
 	if (next != ST_INVAL)
-		nextstate(statefd, next);
+		__st_add_task(ap, next);
 
 	st_set_done(ap);
 
-	state_mutex_unlock(ap);
+	st_mutex_unlock();
 
 	return;
 }
@@ -216,9 +214,6 @@ static unsigned int st_ready(struct auto
 	ap->shutdown = 0;
 	ap->state = ST_READY;
 
-	if (ap->submount)
-		master_signal_submount(ap, MASTER_SUBMNT_CONTINUE);
-
 	return 1;
 }
 
@@ -333,18 +328,18 @@ static void do_readmap_cleanup(void *arg
 	ra = (struct readmap_args *) arg;
 
 	ap = ra->ap;
-	ap->readmap_thread = 0;
 
-	state_mutex_lock(ap);
+	st_mutex_lock();
 
-	nextstate(ap->state_pipe[1], ST_READY);
+	ap->readmap_thread = 0;
+	st_ready(ap);
 	st_set_done(ap);
 
-	state_mutex_unlock(ap);
-
 	if (!ap->submount)
 		alarm_add(ap, ap->exp_runfreq);
 
+	st_mutex_unlock();
+
 	free(ra);
 
 	return;
@@ -499,10 +494,8 @@ static unsigned int st_readmap(struct au
 	ra = malloc(sizeof(struct readmap_args));
 	if (!ra) {
 		error(ap->logopt, "failed to malloc reamap cond struct");
-		state_mutex_lock(ap);
-		nextstate(ap->state_pipe[1], ST_READY);
-		state_mutex_unlock(ap);
 		/* It didn't work: return to ready */
+		st_ready(ap);
 		if (!ap->submount)
 			alarm_add(ap, ap->exp_runfreq);
 		return 0;
@@ -528,10 +521,8 @@ static unsigned int st_readmap(struct au
 		error(ap->logopt, "read map thread create failed");
 		st_readmap_cleanup(ra);
 		free(ra);
-		state_mutex_lock(ap);
-		nextstate(ap->state_pipe[1], ST_READY);
-		state_mutex_unlock(ap);
 		/* It didn't work: return to ready */
+		st_ready(ap);
 		if (!ap->submount)
 			alarm_add(ap, ap->exp_runfreq);
 		return 0;
@@ -570,7 +561,7 @@ static unsigned int st_prepare_shutdown(
 		/* It didn't work: return to ready */
 		if (!ap->submount)
 			alarm_add(ap, ap->exp_runfreq);
-		nextstate(ap->state_pipe[1], ST_READY);
+		st_ready(ap);
 		return 0;
 
 	case EXP_STARTED:
@@ -596,7 +587,7 @@ static unsigned int st_force_shutdown(st
 		/* It didn't work: return to ready */
 		if (!ap->submount)
 			alarm_add(ap, ap->exp_runfreq);
-		nextstate(ap->state_pipe[1], ST_READY);
+		st_ready(ap);
 		return 0;
 
 	case EXP_STARTED:
@@ -605,6 +596,18 @@ static unsigned int st_force_shutdown(st
 	return 0;
 }
 
+static unsigned int st_shutdown(struct autofs_point *ap)
+{
+	debug(ap->logopt, "state %d path %s", ap->state, ap->path);
+
+	assert(ap->state == ST_SHUTDOWN_PENDING || ap->state == ST_SHUTDOWN_FORCE);
+
+	ap->state = ST_SHUTDOWN;
+	nextstate(ap->state_pipe[1], ST_SHUTDOWN);
+
+	return 0;
+}
+
 static unsigned int st_prune(struct autofs_point *ap)
 {
 	debug(ap->logopt, "state %d path %s", ap->state, ap->path);
@@ -617,7 +620,7 @@ static unsigned int st_prune(struct auto
 	case EXP_PARTIAL:
 		if (!ap->submount)
 			alarm_add(ap, ap->exp_runfreq);
-		nextstate(ap->state_pipe[1], ST_READY);
+		st_ready(ap);
 		return 0;
 
 	case EXP_STARTED:
@@ -638,7 +641,7 @@ static unsigned int st_expire(struct aut
 	case EXP_PARTIAL:
 		if (!ap->submount)
 			alarm_add(ap, ap->exp_runfreq);
-		nextstate(ap->state_pipe[1], ST_READY);
+		st_ready(ap);
 		return 0;
 
 	case EXP_STARTED:
@@ -665,43 +668,35 @@ static struct state_queue *st_alloc_task
 	return task;
 }
 
-/* Insert alarm entry on ordered list. */
-int st_add_task(struct autofs_point *ap, enum states state)
+/*
+ * Insert alarm entry on ordered list.
+ * State queue mutex and ap state mutex, in that order, must be held.
+ */
+int __st_add_task(struct autofs_point *ap, enum states state)
 {
 	struct list_head *head;
 	struct list_head *p, *q;
 	struct state_queue *new;
-	enum states ap_state;
 	unsigned int empty = 1;
 	int status;
 
 	/* Task termination marker, poke state machine */
 	if (state == ST_READY) {
-		state_mutex_lock(ap);
 		st_ready(ap);
-		state_mutex_unlock(ap);
-
-		st_mutex_lock();
 
 		signaled = 1;
 		status = pthread_cond_signal(&cond);
 		if (status)
 			fatal(status);
 
-		st_mutex_unlock();
-
 		return 1;
 	}
 
-	state_mutex_lock(ap);
-	ap_state = ap->state;
-	if (ap_state == ST_SHUTDOWN) {
-		state_mutex_unlock(ap);
+	if (ap->state == ST_SHUTDOWN)
 		return 1;
-	}
-	state_mutex_unlock(ap);
 
-	st_mutex_lock();
+	if (state == ST_SHUTDOWN)
+		return st_shutdown(ap);
 
 	head = &state_queue;
 
@@ -718,8 +713,8 @@ int st_add_task(struct autofs_point *ap,
 
 		/* Don't add duplicate tasks */
 		if ((task->state == state && !task->done) ||
-		   (ap_state == ST_SHUTDOWN_PENDING ||
-		    ap_state == ST_SHUTDOWN_FORCE))
+		   (ap->state == ST_SHUTDOWN_PENDING ||
+		    ap->state == ST_SHUTDOWN_FORCE))
 			break;
 
 		/* No pending tasks */
@@ -736,8 +731,8 @@ int st_add_task(struct autofs_point *ap,
 			p_task = list_entry(q, struct state_queue, pending);
 
 			if (p_task->state == state ||
-			   (ap_state == ST_SHUTDOWN_PENDING ||
-			    ap_state == ST_SHUTDOWN_FORCE))
+			   (ap->state == ST_SHUTDOWN_PENDING ||
+			    ap->state == ST_SHUTDOWN_FORCE))
 				goto done;
 		}
 
@@ -760,11 +755,24 @@ done:
 	if (status)
 		fatal(status);
 
+	return 1;
+}
+
+int st_add_task(struct autofs_point *ap, enum states state)
+{
+	int ret;
+
+	st_mutex_lock();
+	ret = __st_add_task(ap, state);
 	st_mutex_unlock();
 
-	return 1;
+	return ret;
 }
 
+/*
+ * Remove state queue tasks for ap.
+ * State queue mutex and ap state mutex, in that order, must be held.
+ */
 void st_remove_tasks(struct autofs_point *ap)
 {
 	struct list_head *head;
@@ -772,14 +780,10 @@ void st_remove_tasks(struct autofs_point
 	struct state_queue *task, *waiting;
 	int status;
 
-	st_mutex_lock();
-
 	head = &state_queue;
 
-	if (list_empty(head)) {
-		st_mutex_unlock();
+	if (list_empty(head))
 		return;
-	}
 
 	p = head->next;
 	while (p != head) {
@@ -816,12 +820,107 @@ void st_remove_tasks(struct autofs_point
 	if (status)
 		fatal(status);
 
+	return;
+}
+
+static int st_task_active(struct autofs_point *ap, enum states state)
+{
+	struct list_head *head;
+	struct list_head *p, *q;
+	struct state_queue *task, *waiting;
+	unsigned int active = 0;
+
+	st_mutex_lock();
+
+	head = &state_queue;
+
+	list_for_each(p, head) {
+		task = list_entry(p, struct state_queue, list);
+
+		if (task->ap != ap)
+			continue;
+
+		if (task->state == state) {
+			active = 1;
+			break;
+		}
+
+		if (state == ST_ANY) {
+			active = 1;
+			break;
+		}
+
+		list_for_each(q, &task->pending) {
+			waiting = list_entry(q, struct state_queue, pending);
+
+			if (waiting->state == state) {
+				active = 1;
+				break;
+			}
+
+			if (state == ST_ANY) {
+				active = 1;
+				break;
+			}
+		}
+	}
+
 	st_mutex_unlock();
 
-	return;
+	return active;
+}
+
+int st_wait_task(struct autofs_point *ap, enum states state, unsigned int seconds)
+{
+	unsigned int wait = 0;
+	unsigned int duration = 0;
+	int ret = 0;
 
+	while (1) {
+		struct timespec t = { 0, 200000000 };
+		struct timespec r;
+
+		while (nanosleep(&t, &r) == -1 && errno == EINTR)
+			memcpy(&t, &r, sizeof(struct timespec));
+
+		if (wait++ == 4) {
+			wait = 0;
+			duration++;
+		}
+
+		if (!st_task_active(ap, state)) {
+			ret = 1;
+			break;
+		}
+
+		if (seconds && duration >= seconds)
+			break;
+	}
+
+	return ret;
 }
 
+int st_wait_state(struct autofs_point *ap, enum states state)
+{
+	while (1) {
+		struct timespec t = { 0, 200000000 };
+		struct timespec r;
+
+		while (nanosleep(&t, &r) == -1 && errno == EINTR)
+			memcpy(&t, &r, sizeof(struct timespec));
+
+		st_mutex_lock();
+		if (ap->state == state) {
+			st_mutex_unlock();
+			return 1;
+		}
+		st_mutex_unlock();
+	}
+
+	return 0;
+}
+
+
 static int run_state_task(struct state_queue *task)
 {
 	struct autofs_point *ap;
@@ -831,8 +930,6 @@ static int run_state_task(struct state_q
 	ap = task->ap;
 	next_state = task->state;
 
-	state_mutex_lock(ap);
-
 	state = ap->state;
 
 	if (next_state != state) {
@@ -862,8 +959,6 @@ static int run_state_task(struct state_q
 		}
 	}
 
-	state_mutex_unlock(ap);
-
 	return ret;
 }
 
@@ -888,8 +983,6 @@ static void st_set_done(struct autofs_po
 	struct list_head *p, *head;
 	struct state_queue *task;
 
-	st_mutex_lock();
-
 	head = &state_queue;
 	list_for_each(p, head) {
 		task = list_entry(p, struct state_queue, list);
@@ -899,8 +992,6 @@ static void st_set_done(struct autofs_po
 		}
 	}
 
-	st_mutex_unlock();
-
 	return;
 }
 
--- autofs-5.0.3.orig/include/automount.h
+++ autofs-5.0.3/include/automount.h
@@ -399,7 +399,6 @@ struct autofs_point {
 	unsigned logopt;		/* Per map logging */
 	pthread_t exp_thread;		/* Thread that is expiring */
 	pthread_t readmap_thread;	/* Thread that is reading maps */
-	pthread_mutex_t state_mutex;	/* Protect state changes */
 	enum states state;		/* Current state */
 	int state_pipe[2];		/* State change router pipe */
 	unsigned dir_created;		/* Directory created for this mount? */
@@ -407,8 +406,6 @@ struct autofs_point {
 					 * host from which to mount */
 	struct autofs_point *parent;	/* Owner of mounts list for submount */
 	pthread_mutex_t mounts_mutex;	/* Protect mount lists */
-	pthread_cond_t mounts_cond;	/* Submounts condition variable */
-	unsigned int mounts_signaled;	/* Submount signals task complete */
 	struct list_head mounts;	/* List of autofs mounts at current level */
 	unsigned int submount;		/* Is this a submount */
 	unsigned int shutdown;		/* Shutdown notification */
@@ -446,20 +443,6 @@ int handle_packet_missing_direct(struct 
 void rm_unwanted(unsigned logopt, const char *path, int incl, dev_t dev);
 int count_mounts(unsigned logopt, const char *path, dev_t dev);
 
-#define state_mutex_lock(ap) \
-do { \
-	int _st_lock = pthread_mutex_lock(&ap->state_mutex); \
-	if (_st_lock) \
-		fatal(_st_lock); \
-} while(0)
-
-#define state_mutex_unlock(ap) \
-do{ \
-	int _st_unlock = pthread_mutex_unlock(&ap->state_mutex); \
-	if (_st_unlock) \
-		fatal(_st_unlock); \
-} while (0)
-
 #define mounts_mutex_lock(ap) \
 do { \
 	int _m_lock = pthread_mutex_lock(&ap->mounts_mutex); \
--- autofs-5.0.3.orig/include/master.h
+++ autofs-5.0.3/include/master.h
@@ -20,10 +20,6 @@
 #ifndef MASTER_H
 #define MASTER_H
 
-#define MASTER_SUBMNT_WAIT	0
-#define MASTER_SUBMNT_CONTINUE	1
-#define MASTER_SUBMNT_JOIN	2
-
 struct map_source {
 	char *type;
 	char *format;
@@ -104,7 +100,6 @@ struct master *master_new(const char *, 
 int master_read_master(struct master *, time_t, int);
 int master_submount_list_empty(struct autofs_point *ap);
 int master_notify_submount(struct autofs_point *, const char *path, enum states);
-void master_signal_submount(struct autofs_point *, unsigned int);
 void master_notify_state_change(struct master *, int);
 int master_mount_mounts(struct master *, time_t, int);
 extern inline unsigned int master_get_logopt(void);
--- autofs-5.0.3.orig/include/state.h
+++ autofs-5.0.3/include/state.h
@@ -38,7 +38,8 @@
  *
  */
 enum states {
-	ST_INVAL = -1,
+	ST_ANY = -2,
+	ST_INVAL,
 	ST_INIT,
 	ST_READY,
 	ST_EXPIRE,
@@ -81,12 +82,18 @@ struct readmap_args {
 	time_t now;              /* Time when map is read */
 };
 
+void st_mutex_lock(void);
+void st_mutex_unlock(void);
+
 void expire_cleanup(void *);
 void expire_proc_cleanup(void *);
 void nextstate(int, enum states);
 
 int st_add_task(struct autofs_point *, enum states);
+int __st_add_task(struct autofs_point *, enum states);
 void st_remove_tasks(struct autofs_point *);
+int st_wait_task(struct autofs_point *, enum states, unsigned int);
+int st_wait_state(struct autofs_point *ap, enum states state);
 int st_start_handler(void);
 
 #endif
--- autofs-5.0.3.orig/lib/alarm.c
+++ autofs-5.0.3/lib/alarm.c
@@ -178,7 +178,6 @@ static void *alarm_handler(void *arg)
 	head = &alarms;
 
 	while (1) {
-
 		if (list_empty(head)) {
 			/* No alarms, wait for one to be added */
 			status = pthread_cond_wait(&cond, &mutex);
@@ -211,19 +210,8 @@ static void *alarm_handler(void *arg)
 
 			if (!first->cancel) {
 				struct autofs_point *ap = first->ap;
-				/* 
-				 * We need to unlock the alarm list in case
-				 * some other thread holds the state_mutex
-				 *_lock(ap), and is currently trying to do
-				 * some alarm_* function (i.e if we don't 
-				 * unlock, we might deadlock).
-				 */
 				alarm_unlock(); 
-
-				state_mutex_lock(ap);
-				nextstate(ap->state_pipe[1], ST_EXPIRE);
-				state_mutex_unlock(ap);
-
+				st_add_task(ap, ST_EXPIRE);
 				alarm_lock();
 			}
 			free(first);
--- autofs-5.0.3.orig/lib/master.c
+++ autofs-5.0.3/lib/master.c
@@ -90,41 +90,20 @@ int master_add_autofs_point(struct maste
 	ap->logopt = logopt;
 
 	ap->parent = NULL;
+	ap->thid = 0;
 	ap->submnt_count = 0;
 	ap->submount = submount;
 	INIT_LIST_HEAD(&ap->mounts);
 	INIT_LIST_HEAD(&ap->submounts);
 	ap->shutdown = 0;
 
-	status = pthread_mutex_init(&ap->state_mutex, NULL);
-	if (status) {
-		free(ap->path);
-		free(ap);
-		return 0;
-	}
-
 	status = pthread_mutex_init(&ap->mounts_mutex, NULL);
 	if (status) {
-		status = pthread_mutex_destroy(&ap->state_mutex);
-		if (status)
-			fatal(status);
 		free(ap->path);
 		free(ap);
 		return 0;
 	}
 
-	status = pthread_cond_init(&ap->mounts_cond, NULL);
-	if (status) {
-		status = pthread_mutex_destroy(&ap->mounts_mutex);
-		if (status)
-			fatal(status);
-		status = pthread_mutex_destroy(&ap->state_mutex);
-		if (status)
-			fatal(status);
-		free(ap->path);
-		free(ap);
-		return 0;
-	}
 	entry->ap = ap;
 
 	return 1;
@@ -137,18 +116,10 @@ void master_free_autofs_point(struct aut
 	if (!ap)
 		return;
 
-	status = pthread_mutex_destroy(&ap->state_mutex);
-	if (status)
-		fatal(status);
-
 	status = pthread_mutex_destroy(&ap->mounts_mutex);
 	if (status)
 		fatal(status);
 
-	status = pthread_cond_destroy(&ap->mounts_cond);
-	if (status)
-		fatal(status);
-
 	free(ap->path);
 	free(ap);
 }
@@ -295,11 +266,9 @@ struct map_source *master_find_map_sourc
 {
 	struct map_source *source = NULL;
 
-	master_mutex_lock();
-
+	master_source_readlock(entry);
 	source = __master_find_map_source(entry, type, format, argc, argv);
-
-	master_mutex_unlock();
+	master_source_unlock(entry);
 
 	return source;
 }
@@ -519,13 +488,7 @@ void send_map_update_request(struct auto
 	if (!need_update)
 		return;
 
-	status = pthread_mutex_lock(&ap->state_mutex);
-	if (status)
-		fatal(status);
-	nextstate(ap->state_pipe[1], ST_READMAP);
-	status = pthread_mutex_unlock(&ap->state_mutex);
-	if (status)
-		fatal(status);
+	st_add_task(ap, ST_READMAP);
 
 	return;
 }
@@ -695,17 +658,13 @@ void master_remove_mapent(struct master_
 	if (entry->ap->submount)
 		return;
 
-	master_mutex_lock();
 	if (!list_empty(&entry->list))
 		list_del_init(&entry->list);
-	master_mutex_unlock();
 	return;
 }
 
 void master_free_mapent_sources(struct master_mapent *entry, unsigned int free_cache)
 {
-	master_source_writelock(entry);
-
 	if (entry->maps) {
 		struct map_source *m, *n;
 
@@ -718,8 +677,6 @@ void master_free_mapent_sources(struct m
 		entry->maps = NULL;
 	}
 
-	master_source_unlock(entry);
-
 	return;
 }
 
@@ -827,10 +784,9 @@ int master_submount_list_empty(struct au
 int master_notify_submount(struct autofs_point *ap, const char *path, enum states state)
 {
 	struct list_head *head, *p;
-	struct autofs_point *this;
-	pthread_t thid;
+	struct autofs_point *this = NULL;
 	size_t plen = strlen(path);
-	int status, ret = 1;
+	int ret = 1;
 
 	mounts_mutex_lock(ap);
 
@@ -869,64 +825,30 @@ int master_notify_submount(struct autofs
 
 		/* Now we have a submount to expire */
 
-		state_mutex_lock(this);
+		st_mutex_lock();
 
 		if (this->state == ST_SHUTDOWN) {
-			state_mutex_unlock(this);
+			this = NULL;
+			st_mutex_unlock();
 			break;
 		}
 
-		nextstate(this->state_pipe[1], state);
-
-		state_mutex_unlock(this);
-
-		thid = this->thid;
-		ap->mounts_signaled = MASTER_SUBMNT_WAIT;
-		while (ap->mounts_signaled == MASTER_SUBMNT_WAIT) {
-			status = pthread_cond_wait(&ap->mounts_cond, &ap->mounts_mutex);
-			if (status)
-				fatal(status);
-		}
-
-		if (ap->mounts_signaled == MASTER_SUBMNT_JOIN) {
-			status = pthread_join(thid, NULL);
-			if (status)
-				fatal(status);
-		} else
-			ret = 0;
+		this->shutdown = ap->shutdown;
 
-		break;
-	}
-
-	mounts_mutex_unlock(ap);
+		__st_add_task(this, state);
 
-	return ret;
-}
-
-void master_signal_submount(struct autofs_point *ap, unsigned int join)
-{
-	int status;
-
-	if (!ap->parent || !ap->submount)
-		return;
+		st_mutex_unlock();
+		mounts_mutex_unlock(ap);
 
-	mounts_mutex_lock(ap->parent);
+		st_wait_task(this, state, 0);
 
-	ap->parent->mounts_signaled = join;
+		return ret;
 
-	if (join == MASTER_SUBMNT_JOIN) {
-		/* We are finishing up */
-		ap->parent->submnt_count--;
-		list_del(&ap->mounts);
 	}
 
-	status = pthread_cond_signal(&ap->parent->mounts_cond);
-	if (status)
-		fatal(status);
-
-	mounts_mutex_unlock(ap->parent);
+	mounts_mutex_unlock(ap);
 
-	return;
+	return ret;
 }
 
 void master_notify_state_change(struct master *master, int sig)
@@ -934,7 +856,7 @@ void master_notify_state_change(struct m
 	struct master_mapent *entry;
 	struct autofs_point *ap;
 	struct list_head *p;
-	int state_pipe, cur_state;
+	int cur_state;
 	unsigned int logopt;
 
 	pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cur_state);
@@ -948,13 +870,11 @@ void master_notify_state_change(struct m
 		ap = entry->ap;
 		logopt = ap->logopt;
 
-		state_mutex_lock(ap);
+		st_mutex_lock();
 
 		if (ap->state == ST_SHUTDOWN)
 			goto next;
 
-		state_pipe = ap->state_pipe[1];
-
 		switch (sig) {
 		case SIGTERM:
 		case SIGINT:
@@ -962,7 +882,7 @@ void master_notify_state_change(struct m
 			    ap->state != ST_SHUTDOWN_FORCE) {
 				next = ST_SHUTDOWN_PENDING;
 				ap->shutdown = 1;
-				nextstate(state_pipe, next);
+				__st_add_task(ap, next);
 			}
 			break;
 #ifdef ENABLE_FORCED_SHUTDOWN
@@ -970,14 +890,15 @@ void master_notify_state_change(struct m
 			if (ap->state != ST_SHUTDOWN_FORCE &&
 			    ap->state != ST_SHUTDOWN_PENDING) {
 				next = ST_SHUTDOWN_FORCE;
-				nextstate(state_pipe, next);
+				ap->shutdown = 1;
+				__st_add_task(ap, next);
 			}
 			break;
 #endif
 		case SIGUSR1:
 			assert(ap->state == ST_READY);
 			next = ST_PRUNE;
-			nextstate(state_pipe, next);
+			__st_add_task(ap, next);
 			break;
 		}
 next:
@@ -986,7 +907,7 @@ next:
 			      "sig %d switching %s from %d to %d",
 			      sig, ap->path, ap->state, next);
 
-		state_mutex_unlock(ap);
+		st_mutex_unlock();
 	}
 
 	master_mutex_unlock();
@@ -1024,7 +945,6 @@ static int master_do_mount(struct master
 		handle_mounts_startup_cond_destroy(&suc);
 		return 0;
 	}
-	entry->thid = thid;
 
 	while (!suc.done) {
 		status = pthread_cond_wait(&suc.cond, &suc.mutex);
@@ -1037,45 +957,18 @@ static int master_do_mount(struct master
 		handle_mounts_startup_cond_destroy(&suc);
 		return 0;
 	}
+	entry->thid = thid;
 
 	handle_mounts_startup_cond_destroy(&suc);
 
 	return 1;
 }
 
-static void shutdown_entry(struct master_mapent *entry)
-{
-	int state_pipe;
-	struct autofs_point *ap;
-	struct stat st;
-	int ret;
-
-	ap = entry->ap;
-
-	debug(ap->logopt, "%s", entry->path);
-
-	state_mutex_lock(ap);
-
-	state_pipe = ap->state_pipe[1];
-
-	ret = fstat(state_pipe, &st);
-	if (ret == -1)
-		goto next;
-
-	nextstate(state_pipe, ST_SHUTDOWN_PENDING);
-next:
-	state_mutex_unlock(ap);
-
-	return;
-}
-
 static void check_update_map_sources(struct master_mapent *entry, int readall)
 {
 	struct map_source *source, *last;
-	int state_pipe, map_stale = 0;
 	struct autofs_point *ap;
-	struct stat st;
-	int ret;
+	int map_stale = 0;
 
 	if (readall)
 		map_stale = 1;
@@ -1128,17 +1021,8 @@ static void check_update_map_sources(str
 	master_source_unlock(entry);
 
 	/* The map sources have changed */
-	if (map_stale) {
-		state_mutex_lock(ap);
-
-		state_pipe = entry->ap->state_pipe[1];
-
-		ret = fstat(state_pipe, &st);
-		if (ret != -1)
-			nextstate(state_pipe, ST_READMAP);
-
-		state_mutex_unlock(ap);
-	}
+	if (map_stale)
+		st_add_task(ap, ST_READMAP);
 
 	return;
 }
@@ -1169,17 +1053,19 @@ int master_mount_mounts(struct master *m
 
 		/* A master map entry has gone away */
 		if (this->age < age) {
-			shutdown_entry(this);
+			st_add_task(ap, ST_SHUTDOWN_PENDING);
 			continue;
 		}
 
+		master_source_writelock(ap->entry);
 		lookup_close_lookup(ap);
+		master_source_unlock(ap->entry);
 
 		cache_readlock(nc);
 		ne = cache_lookup_distinct(nc, this->path);
 		if (ne && this->age > ne->age) {
 			cache_unlock(nc);
-			shutdown_entry(this);
+			st_add_task(ap, ST_SHUTDOWN_PENDING);
 			continue;
 		}
 		nested = cache_partial_match(nc, this->path);
@@ -1195,7 +1081,7 @@ int master_mount_mounts(struct master *m
 
 		check_update_map_sources(this, readall);
 
-		state_mutex_lock(ap);
+		st_mutex_lock();
 
 		state_pipe = this->ap->state_pipe[1];
 
@@ -1203,7 +1089,7 @@ int master_mount_mounts(struct master *m
 		ret = fstat(state_pipe, &st);
 		save_errno = errno;
 
-		state_mutex_unlock(ap);
+		st_mutex_unlock();
 
 		if (ret == -1 && save_errno == EBADF)
 			if (!master_do_mount(this)) {
--- autofs-5.0.3.orig/modules/mount_autofs.c
+++ autofs-5.0.3/modules/mount_autofs.c
@@ -242,7 +242,6 @@ int mount_mount(struct autofs_point *ap,
 		master_free_mapent(entry);
 		return 1;
 	}
-	nap->thid = thid;
 
 	while (!suc.done) {
 		status = pthread_cond_wait(&suc.cond, &suc.mutex);
@@ -264,6 +263,7 @@ int mount_mount(struct autofs_point *ap,
 		master_free_mapent(entry);
 		return 1;
 	}
+	nap->thid = thid;
 
 	ap->submnt_count++;
 	list_add(&nap->mounts, &ap->submounts);