--- //depot/projects/smpng/sys/fs/udf/udf_vfsops.c 2008/11/03 21:11:59 +++ //depot/user/jhb/lock/fs/udf/udf_vfsops.c 2008/11/20 20:48:51 @@ -342,6 +343,7 @@ mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum; MNT_ILOCK(mp); mp->mnt_flag |= MNT_LOCAL; + mp->mnt_kern_flag |= MNTK_MPSAFE | MNTK_LOOKUP_SHARED; MNT_IUNLOCK(mp); udfmp->im_mountp = mp; udfmp->im_dev = devvp->v_rdev; @@ -538,22 +540,13 @@ udf_root(struct mount *mp, int flags, struct vnode **vpp, struct thread *td) { struct udf_mnt *udfmp; - struct vnode *vp; ino_t id; - int error; udfmp = VFSTOUDFFS(mp); id = udf_getid(&udfmp->root_icb); - error = udf_vget(mp, id, LK_EXCLUSIVE, vpp); - if (error) - return error; - - vp = *vpp; - vp->v_vflag |= VV_ROOT; - - return (0); + return (udf_vget(mp, id, flags, vpp)); } static int @@ -589,6 +582,22 @@ if (error || *vpp != NULL) return (error); + /* + * We must promote to an exclusive lock for vnode creation. This + * can happen if lookup is passed LOCKSHARED. + */ + if ((flags & LK_TYPE_MASK) == LK_SHARED) { + flags &= ~LK_TYPE_MASK; + flags |= LK_EXCLUSIVE; + } + + /* + * We do not lock vnode creation as it is believed to be too + * expensive for such rare case as simultaneous creation of vnode + * for same ino by different processes. We just allow them to race + * and check later to decide who wins. Let the race begin! + */ + td = curthread; udfmp = VFSTOUDFFS(mp); @@ -600,6 +609,7 @@ return (error); } + VN_LOCK_AREC(vp); unode->i_vnode = vp; unode->hash_id = ino; unode->udfmp = udfmp; @@ -680,6 +690,13 @@ vp->v_type = VLNK; break; } + + if (vp->v_type != VFIFO) + VN_LOCK_ASHARE(vp); + + if (ino == udf_getid(&udfmp->root_icb)) + vp->v_vflag |= VV_ROOT; + *vpp = vp; return (0); --- //depot/projects/smpng/sys/fs/udf/udf_vnops.c 2008/11/03 21:11:59 +++ //depot/user/jhb/lock/fs/udf/udf_vnops.c 2008/11/21 19:19:32 @@ -899,13 +899,14 @@ long namelen; ino_t id = 0; int offset, error = 0; - int numdirpasses, fsize; + int fsize, lkflags, ltype, numdirpasses; dvp = a->a_dvp; node = VTON(dvp); udfmp = node->udfmp; nameiop = a->a_cnp->cn_nameiop; flags = a->a_cnp->cn_flags; + lkflags = a->a_cnp->cn_lkflags; nameptr = a->a_cnp->cn_nameptr; namelen = a->a_cnp->cn_namelen; fsize = le64toh(node->fentry->inf_len); @@ -967,20 +968,38 @@ /* Did we have a match? */ if (id) { - if (flags & ISDOTDOT) + /* + * Remember where this entry was if it's the final + * component. + */ + if ((flags & ISLASTCN) && nameiop == LOOKUP) + node->diroff = ds->offset + ds->off; + if (numdirpasses == 2) + nchstats.ncs_pass2++; + udf_closedir(ds); + + if (flags & ISDOTDOT) { + ltype = VOP_ISLOCKED(dvp); VOP_UNLOCK(dvp, 0); - error = udf_vget(udfmp->im_mountp, id, LK_EXCLUSIVE, &tdp); - if (flags & ISDOTDOT) - vn_lock(dvp, LK_EXCLUSIVE|LK_RETRY); - if (!error) { + error = udf_vget(udfmp->im_mountp, id, lkflags, &tdp); + vn_lock(dvp, ltype | LK_RETRY); + } else if (node->hash_id == id) { + VREF(dvp); /* we want ourself, ie "." */ /* - * Remember where this entry was if it's the final - * component. + * When we lookup "." we still can be asked to lock it + * differently. */ - if ((flags & ISLASTCN) && nameiop == LOOKUP) - node->diroff = ds->offset + ds->off; - if (numdirpasses == 2) - nchstats.ncs_pass2++; + ltype = lkflags & LK_TYPE_MASK; + if (ltype != VOP_ISLOCKED(dvp)) { + if (ltype == LK_EXCLUSIVE) + vn_lock(dvp, LK_UPGRADE | LK_RETRY); + else /* if (ltype == LK_SHARED) */ + vn_lock(dvp, LK_DOWNGRADE | LK_RETRY); + } + tdp = dvp; + } else + error = udf_vget(udfmp->im_mountp, id, lkflags, &tdp); + if (!error) { *vpp = tdp; /* Put this entry in the cache */ if (flags & MAKEENTRY) @@ -994,6 +1013,7 @@ udf_closedir(ds); goto lookloop; } + udf_closedir(ds); /* Enter name into cache as non-existant */ if (flags & MAKEENTRY) @@ -1007,7 +1027,6 @@ } } - udf_closedir(ds); return (error); }