--- linux-2.4.22/fs/nfs/write.c.fix4.notfinished Tue Oct 7 08:15:56 2003 +++ linux-2.4.22/fs/nfs/write.c Tue Oct 7 09:14:16 2003 @@ -248,6 +248,12 @@ err = nfs_writepage_async(NULL, inode, page, 0, offset); if (err >= 0) err = 0; + else if(err == -EDEADLK){ + printk(KERN_WARNING "NFS DEBUG: EDEADLK returned, doing nfs_writepage_sync\n"); + err = nfs_writepage_sync(NULL, inode, page, 0, offset); + if (err == offset) + err = 0; + } } else { err = nfs_writepage_sync(NULL, inode, page, 0, offset); if (err == offset) @@ -292,19 +298,22 @@ * & co. for the 'write append' case. For 2.5 we may want to consider * some form of hashing so as to perform well on random writes. */ -static inline void +static inline int nfs_inode_add_request(struct inode *inode, struct nfs_page *req) { struct list_head *pos, *head; unsigned long pg_idx = page_index(req->wb_page); if (!list_empty(&req->wb_hash)) - return; + return 0; if (!NFS_WBACK_BUSY(req)) printk(KERN_ERR "NFS: unlocked request attempted hashed!\n"); head = &inode->u.nfs_i.writeback; if (list_empty(head)) - igrab(inode); + if(!igrab(inode)){ + printk(KERN_WARNING "NFS DEBUG: igrab failed! Aborting async writepage.\n"); + return -EDEADLK; + } list_for_each_prev(pos, head) { struct nfs_page *entry = nfs_inode_wb_entry(pos); if (page_index(entry->wb_page) < pg_idx) @@ -313,6 +322,7 @@ inode->u.nfs_i.npages++; list_add(&req->wb_hash, pos); req->wb_count++; + return 0; } /* @@ -659,9 +669,15 @@ } if (new) { + int error; nfs_lock_request_dontget(new); - nfs_inode_add_request(inode, new); + error = nfs_inode_add_request(inode, new); spin_unlock(&nfs_wreq_lock); + if(error < 0){ + nfs_unlock_request(new); + nfs_release_request(new); + return ERR_PTR(error); + } nfs_mark_request_dirty(new); return new; }