Sophie

Sophie

distrib > PLD > ac > amd64 > by-pkgid > 950ec4453099b5125884e99014f11757 > files > 41

kernel24-2.4.34-1.src.rpm

diff -u -r -N ../../linus/2.4/linux/Documentation/Configure.help linux/Documentation/Configure.help
--- ../../linus/2.4/linux/Documentation/Configure.help	Tue Aug  6 21:13:55 2002
+++ linux/Documentation/Configure.help	Tue Aug  6 21:21:50 2002
@@ -694,6 +694,27 @@
   say M here and read <file:Documentation/modules.txt>.  The module
   will be called ide-cd.o.
 
+
+Packet writing on CD/DVD media (EXPERIMENTAL)
+CONFIG_CDROM_PKTCDVD
+  If you have a CDROM drive that supports packet writing, say Y to
+  include preliminary support. It should work with any MMC/Mt Fuji
+  complain ATAPI or SCSI drive, which is just about any newer CD
+  writer.
+
+  Currently only writing to CD-RW discs is possible.
+
+  If you want to compile the driver as a module ( = code which can be
+  inserted in and removed from the running kernel whenever you want),
+  say M here and read Documentation/modules.txt. The module will be
+  called pktcdvd.o
+
+Write caching
+CONFIG_CDROM_PKTCDVD_WCACHE
+  If enabled, write caching will be set for the CD-R/W device. For now
+  this option is dangerous unless the CD-RW media is known good, as we
+  don't do deferred write error handling yet.
+
 Include IDE/ATAPI TAPE support
 CONFIG_BLK_DEV_IDETAPE
   If you have an IDE tape drive using the ATAPI protocol, say Y.
diff -u -r -N ../../linus/2.4/linux/arch/sparc64/kernel/ioctl32.c linux/arch/sparc64/kernel/ioctl32.c
--- ../../linus/2.4/linux/arch/sparc64/kernel/ioctl32.c	Tue Aug  6 21:14:27 2002
+++ linux/arch/sparc64/kernel/ioctl32.c	Tue Aug  6 21:22:04 2002
@@ -90,6 +90,7 @@
 #include <linux/atm_tcp.h>
 #include <linux/sonet.h>
 #include <linux/atm_suni.h>
+#include <linux/pktcdvd.h>
 #include <linux/mtd/mtd.h>
 
 #include <net/bluetooth/bluetooth.h>
@@ -849,6 +850,41 @@
 	return ret;
 }
 
+struct packet_stats32 {
+	u32	bh_s;
+	u32	bh_e;
+	u32	bh_cache_hits;
+	u32	page_cache_hits;
+	u32	bh_w;
+	u32	bh_r;
+};
+
+static inline int pkt_getstats(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	struct packet_stats p;
+	struct packet_stats32 p32;
+	mm_segment_t old_fs = get_fs();
+	int ret;
+
+	ret = copy_from_user (&p32, (struct packet_stats32 *)arg, sizeof(struct packet_stats32));
+	if (ret)
+		return -EFAULT;
+#define P(x) (p.x = (unsigned long)p32.x)
+	P(bh_s);
+	P(bh_e);
+	P(bh_cache_hits);
+	P(page_cache_hits);
+	P(bh_w);
+	P(bh_r);
+#undef P
+
+	set_fs (KERNEL_DS);
+	ret = sys_ioctl (fd, cmd, (long)&p);
+	set_fs (old_fs);
+
+        return ret;
+}
+
 struct hd_geometry32 {
 	unsigned char heads;
 	unsigned char sectors;
@@ -4553,6 +4589,12 @@
 COMPATIBLE_IOCTL(RNDADDENTROPY)
 COMPATIBLE_IOCTL(RNDZAPENTCNT)
 COMPATIBLE_IOCTL(RNDCLEARPOOL)
+/* Big X, CDRW Packet Driver */
+#if defined(CONFIG_CDROM_PKTCDVD)
+COMPATIBLE_IOCTL(PACKET_SETUP_DEV)
+COMPATIBLE_IOCTL(PACKET_TEARDOWN_DEV)
+HANDLE_IOCTL(PACKET_GET_STATS, pkt_getstats)
+#endif /* CONFIG_CDROM_PKTCDVD */
 /* Bluetooth ioctls */
 COMPATIBLE_IOCTL(HCIDEVUP)
 COMPATIBLE_IOCTL(HCIDEVDOWN)
diff -u -r -N ../../linus/2.4/linux/drivers/block/Config.in linux/drivers/block/Config.in
--- ../../linus/2.4/linux/drivers/block/Config.in	Tue Aug  6 21:14:34 2002
+++ linux/drivers/block/Config.in	Tue Aug  6 21:22:08 2002
@@ -39,6 +39,11 @@
 dep_tristate 'Mylex DAC960/DAC1100 PCI RAID Controller support' CONFIG_BLK_DEV_DAC960 $CONFIG_PCI
 dep_tristate 'Micro Memory MM5415 Battery Backed RAM support' CONFIG_BLK_DEV_UMEM $CONFIG_PCI $CONFIG_EXPERIMENTAL
 
+tristate 'Packet writing on CD/DVD media' CONFIG_CDROM_PKTCDVD
+if [ "$CONFIG_CDROM_PKTCDVD" != "n" ]; then
+    bool '   Enable write caching' CONFIG_CDROM_PKTCDVD_WCACHE n
+fi
+   
 tristate 'Loopback device support' CONFIG_BLK_DEV_LOOP
 dep_tristate 'Network block device support' CONFIG_BLK_DEV_NBD $CONFIG_NET
 
diff -u -r -N ../../linus/2.4/linux/drivers/block/Makefile linux/drivers/block/Makefile
--- ../../linus/2.4/linux/drivers/block/Makefile	Tue Aug  6 21:14:34 2002
+++ linux/drivers/block/Makefile	Tue Aug  6 21:22:08 2002
@@ -31,6 +31,7 @@
 obj-$(CONFIG_BLK_DEV_DAC960)	+= DAC960.o
 obj-$(CONFIG_BLK_DEV_UMEM)	+= umem.o
 obj-$(CONFIG_BLK_DEV_NBD)	+= nbd.o
+obj-$(CONFIG_CDROM_PKTCDVD)	+= pktcdvd.o
 
 subdir-$(CONFIG_PARIDE) += paride
 
diff -u -r -N ../../linus/2.4/linux/drivers/block/ll_rw_blk.c linux/drivers/block/ll_rw_blk.c
--- ../../linus/2.4/linux/drivers/block/ll_rw_blk.c	Tue Aug  6 21:14:34 2002
+++ linux/drivers/block/ll_rw_blk.c	Tue Aug  6 21:22:08 2002
@@ -1046,6 +1046,7 @@
 	/* Test device size, when known. */
 	if (blk_size[major])
 		minorsize = blk_size[major][MINOR(bh->b_rdev)];
+#if 0
 	if (minorsize) {
 		unsigned long maxsector = (minorsize << 1) + 1;
 		unsigned long sector = bh->b_rsector;
@@ -1069,6 +1070,7 @@
 			return;
 		}
 	}
+#endif
 
 	/*
 	 * Resolve the mapping until finished. (drivers are
@@ -1270,8 +1272,8 @@
 
 	req->errors = 0;
 	if (!uptodate)
-		printk("end_request: I/O error, dev %s (%s), sector %lu\n",
-			kdevname(req->rq_dev), name, req->sector);
+		printk("end_request: I/O error, cmd %d dev %s (%s), sector %lu\n",
+			req->cmd, kdevname(req->rq_dev), name, req->sector);
 
 	if ((bh = req->bh) != NULL) {
 		nsect = bh->b_size >> 9;
diff -u -r -N ../../linus/2.4/linux/drivers/block/pktcdvd.c linux/drivers/block/pktcdvd.c
--- ../../linus/2.4/linux/drivers/block/pktcdvd.c	Thu Jan  1 01:00:00 1970
+++ linux/drivers/block/pktcdvd.c	Thu Aug  8 20:44:32 2002
@@ -0,0 +1,2524 @@
+/*
+ * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
+ *
+ * May be copied or modified under the terms of the GNU General Public
+ * License.  See linux/COPYING for more information.
+ *
+ * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and
+ * DVD-RW devices (aka an exercise in block layer masturbation)
+ *
+ *
+ * TODO: (circa order of when I will fix it)
+ * - Only able to write on CD-RW media right now.
+ * - check host application code on media and set it in write page
+ * - Generic interface for UDF to submit large packets for variable length
+ *   packet writing
+ * - interface for UDF <-> packet to negotiate a new location when a write
+ *   fails.
+ * - handle OPC, especially for -RW media
+ *
+ * ------------------------------------------------------------------------
+ *
+ * Newer changes -- see ChangeLog
+ *
+ * 0.0.2d (26/10/2000)
+ * - (scsi) use implicit segment recounting for all hba's
+ * - fix speed setting, was consistenly off on most drives
+ * - only print capacity when opening for write
+ * - fix off-by-two error in getting/setting write+read speed (affected
+ *   reporting as well as actual speed used)
+ * - possible to enable write caching on drive
+ * - do ioctl marshalling on sparc64 from Ben Collins <bcollins@debian.org>
+ * - avoid unaligned access on flags, should have been unsigned long of course
+ * - fixed missed wakeup in kpacketd
+ * - b_dev error (two places)
+ * - fix buffer head b_count bugs
+ * - fix hole merge bug, where tail could be added twice
+ * - fsync and invalidate buffers on close
+ * - check hash table for buffers first before using our own
+ * - add read-ahead
+ * - fixed several list races
+ * - fix proc reporting for more than one device
+ * - change to O_CREAT for creating devices
+ * - added media_change hook
+ * - added free buffers config option
+ * - pkt_lock_tray fails on failed open (and oopses), remove it. unlock
+ *   is done explicitly in pkt_remove dev anyway.
+ * - added proper elevator insertion (should probably be part of elevator.c)
+ * - moved kernel thread info to private device, spawn one for each writer
+ * - added separate buffer list for dirty packet buffers
+ * - fixed nasty data corruption bug
+ * - remember to account request even when we don't gather data for it
+ * - add ioctl to force wakeup of kernel thread (for debug)
+ * - fixed packet size setting bug on zero detected
+ * - changed a lot of the proc reporting to be more readable to "humans"
+ * - set full speed for read-only opens
+ *
+ * 0.0.2c (08/09/2000)
+ * - inc usage count of buffer heads
+ * - add internal buffer pool to avoid deadlock on oom
+ * - gather data for as many buffers as we have, before initiating write. this
+ *   allows the laser to stay on longer, giving better performance.
+ * - fix always busy when tray can't be locked
+ * - remove request duplication nastiness, inject directly into the target
+ * - adapted to devfs and elevator changes
+ * - added proc interface
+ *
+ * 0.0.2b (21/06/2000)
+ * - fix io_request_lock typos (missing '&')
+ * - grab pkt_sem before invoking pkt_handle_queue
+ * - SCSI uses queuedata too, mirror that in pd->queuedata (hack)
+ * - remove SCSI sr debug messages
+ * - really activate empty block querying (requires cvs UDF, CDRW branch)
+ * - make sure sync_buffers doesn't consider us, or we can deadlock
+ * - make sure people don't swap on us (for now ;)
+ *
+ * 0.0.2a (19/06/2000)
+ * - add kpacketd kernel thread to handle actual data gathering
+ * - pd->pkt_dev is now real device, not just minor
+ * - add support for super_operations block_empty fn, to query fs for
+ *   unused blocks that don't need reading
+ * - "cache" blocks that are contained in the UDF file/dir packet
+ * - rewrite pkt_gather_data to a one-step solution
+ * - add private pktcdvd elevator
+ * - shutdown write access to device upon write failure
+ * - fix off-by-one bug in capacity
+ * - setup sourceforge project (packet-cd.sourceforge.net)
+ * - add more blk ioctls to pkt_ioctl
+ * - set inactive request queue head
+ * - change panic calls to BUG, better with kdb
+ * - have pkt_gather_data check correct block size and kill rq if wrong
+ * - rework locking
+ * - introduce per-pd queues, simplifies pkt_request
+ * - store pd in queuedata
+ *
+ *************************************************************************/
+
+#define VERSION_CODE	"v0.0.2p 03/03/2002 Jens Axboe (axboe@suse.de)"
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/locks.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/file.h>
+#include <linux/blk.h>
+#include <linux/blkpg.h>
+#include <linux/cdrom.h>
+#include <linux/ide.h>
+#include <linux/smp_lock.h>
+#include <linux/pktcdvd.h>
+#include <linux/kernel_stat.h>
+#include <linux/sysrq.h>
+
+#include <asm/unaligned.h>
+#include <asm/uaccess.h>
+
+/*
+ * remove for next version -- for now, disable the mention option in the
+ * SCSI section
+ */
+#if defined(CONFIG_SCSI_DEBUG_QUEUES)
+#error "Don't compile with 'Enable extra checks in new queueing code' enabled"
+#endif
+
+#define SCSI_IOCTL_SEND_COMMAND	1
+
+/*
+ * 32 buffers of 2048 bytes
+ */
+#define PACKET_MAX_SIZE		32
+
+#define NEXT_BH(bh, nbh)	\
+	 (((bh)->b_rsector + ((bh)->b_size >> 9)) == (nbh)->b_rsector)
+
+#define BH_IN_ORDER(b1, b2)	\
+	((b1)->b_rsector < (b2)->b_rsector)
+
+#define CONTIG_BH(b1, b2)	\
+	((b1)->b_data + (b1)->b_size == (b2)->b_data)
+
+#define ZONE(sector, pd)	\
+	(((sector) + ((pd)->offset)) - (((sector) + ((pd)->offset)) & (((pd)->settings.size - 1))))
+
+static int *pkt_sizes;
+static int *pkt_blksize;
+static int *pkt_readahead;
+static struct pktcdvd_device *pkt_devs;
+static struct proc_dir_entry *pkt_proc;
+static DECLARE_WAIT_QUEUE_HEAD(pd_bh_wait);
+
+/*
+ * a bit of a kludge, but we want to be able to pass both real and packet
+ * dev and get the right one.
+ */
+static inline struct pktcdvd_device *pkt_find_dev(kdev_t dev)
+{
+	int i;
+
+	for (i = 0; i < MAX_WRITERS; i++)
+		if (pkt_devs[i].dev == dev || pkt_devs[i].pkt_dev == dev)
+			return &pkt_devs[i];
+
+	return NULL;
+}
+
+/*
+ * The following functions are the plugins to the ll_rw_blk
+ * layer and decides whether a given request / buffer head can be
+ * merged. We differ in a couple of ways from "normal" block
+ * devices:
+ *
+ * - don't merge when the buffer / request crosses a packet block
+ *   boundary
+ * - merge buffer head even though it can't be added directly to the
+ *   front or back of the list. this gives us better performance, since
+ *   what would otherwise require multiple requests can now be handled
+ *   in one (hole merging)
+ * - at this point its just writes, reads have already been remapped
+ *
+ * The device original merge_ functions are stored in the packet device
+ * queue (pd->q)
+ *
+ */
+static inline int pkt_do_merge(request_queue_t *q, struct request *rq,
+			       struct buffer_head *bh, int max_segs,
+			       merge_request_fn *merge_fn,
+			       struct pktcdvd_device *pd)
+{
+	void *ptr = q->queuedata;
+	int ret;
+
+	if (rq->cmd != WRITE)
+		BUG();
+
+	if (ZONE(rq->sector, pd) != ZONE(bh->b_rsector, pd))
+		return ELEVATOR_NO_MERGE;
+
+	/*
+	 * NOTE: this is done under the io_request_lock/queue_lock, hence
+	 * it is safe
+	 */
+	q->queuedata = pd->cdrw.queuedata;
+	ret = merge_fn(q, rq, bh, max_segs);
+	q->queuedata = ptr;
+	return ret;
+}
+
+static int pkt_front_merge_fn(request_queue_t *q, struct request *rq,
+			      struct buffer_head *bh, int max_segs)
+{
+	struct pktcdvd_device *pd = &pkt_devs[MINOR(bh->b_rdev)];
+
+	return pkt_do_merge(q, rq, bh, max_segs, pd->cdrw.front_merge_fn, pd);
+}
+
+static int pkt_back_merge_fn(request_queue_t *q, struct request *rq,
+			     struct buffer_head *bh, int max_segs)
+{
+	struct pktcdvd_device *pd = &pkt_devs[MINOR(bh->b_rdev)];
+
+	return pkt_do_merge(q, rq, bh, max_segs, pd->cdrw.back_merge_fn, pd);
+}
+
+/*
+ * rules similar to above
+ */
+static int pkt_merge_requests_fn(request_queue_t *q, struct request *rq,
+				 struct request *nxt, int max_segs)
+{
+	struct pktcdvd_device *pd = pkt_find_dev(rq->rq_dev);
+	struct packet_cdrw *cdrw = &pd->cdrw;
+	void *ptr = q->queuedata;
+	int ret;
+
+	if (ZONE(rq->sector, pd) != ZONE(nxt->sector + nxt->nr_sectors - 1, pd))
+		return 0;
+
+	q->queuedata = cdrw->queuedata;
+	ret = cdrw->merge_requests_fn(q, rq, nxt, max_segs);
+	q->queuedata = ptr;
+	return ret;
+}
+
+static int pkt_grow_bhlist(struct pktcdvd_device *pd, int count)
+{
+	struct packet_cdrw *cdrw = &pd->cdrw;
+	struct buffer_head *bh;
+	int i = 0;
+
+	VPRINTK("grow_bhlist: count=%d\n", count);
+
+	while (i < count) {
+		bh = kmem_cache_alloc(bh_cachep, SLAB_KERNEL);
+		if (!bh)
+			break;
+
+		bh->b_data = kmalloc(CD_FRAMESIZE, GFP_KERNEL);
+		if (!bh->b_data) {
+			kmem_cache_free(bh_cachep, bh);
+			break;
+		}
+		bh->b_page = virt_to_page(bh->b_data);
+
+		spin_lock_irq(&pd->lock);
+		bh->b_pprev = &cdrw->bhlist;
+		bh->b_next = cdrw->bhlist;
+		cdrw->bhlist = bh;
+		spin_unlock_irq(&pd->lock);
+
+		bh->b_size = CD_FRAMESIZE;
+		bh->b_list = PKT_BUF_LIST;
+		atomic_inc(&cdrw->free_bh);
+		i++;
+	}
+
+	return i;
+}
+
+static int pkt_shrink_bhlist(struct pktcdvd_device *pd, int count)
+{
+	struct packet_cdrw *cdrw = &pd->cdrw;
+	struct buffer_head *bh;
+	int i = 0;
+
+	VPRINTK("shrink_bhlist: count=%d\n", count);
+
+	while ((i < count) && cdrw->bhlist) {
+		spin_lock_irq(&pd->lock);
+		bh = cdrw->bhlist;
+		cdrw->bhlist = bh->b_next;
+		spin_unlock_irq(&pd->lock);
+		if (bh->b_list != PKT_BUF_LIST)
+			BUG();
+		kfree(bh->b_data);
+		kmem_cache_free(bh_cachep, bh);
+		atomic_dec(&cdrw->free_bh);
+		i++;
+	}
+
+	return i;
+}
+
+/*
+ * These functions manage a simple pool of buffer_heads.
+ */
+static struct buffer_head *pkt_get_stacked_bh(struct pktcdvd_device *pd)
+{
+	unsigned long flags;
+	struct buffer_head *bh;
+
+	spin_lock_irqsave(&pd->lock, flags);
+	bh = pd->stacked_bhlist;
+	if (bh) {
+		pd->stacked_bhlist = bh->b_next;
+		bh->b_next = NULL;
+		pd->stacked_bhcnt--;
+		BUG_ON(pd->stacked_bhcnt < 0);
+	}
+	spin_unlock_irqrestore(&pd->lock, flags);
+
+	return bh;
+}
+
+static void pkt_put_stacked_bh(struct pktcdvd_device *pd, struct buffer_head *bh)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&pd->lock, flags);
+	if (pd->stacked_bhcnt < STACKED_BH_POOL_SIZE) {
+		bh->b_next = pd->stacked_bhlist;
+		pd->stacked_bhlist = bh;
+		pd->stacked_bhcnt++;
+		bh = NULL;
+	}
+	spin_unlock_irqrestore(&pd->lock, flags);
+	if (bh) {
+		kmem_cache_free(bh_cachep, bh);
+	}
+}
+
+static void pkt_shrink_stacked_bhlist(struct pktcdvd_device *pd)
+{
+	struct buffer_head *bh;
+
+	while ((bh = pkt_get_stacked_bh(pd)) != NULL) {
+		kmem_cache_free(bh_cachep, bh);
+	}
+}
+
+static int pkt_grow_stacked_bhlist(struct pktcdvd_device *pd)
+{
+	struct buffer_head *bh;
+	int i;
+
+	for (i = 0; i < STACKED_BH_POOL_SIZE; i++) {
+		bh = kmem_cache_alloc(bh_cachep, GFP_KERNEL);
+		if (!bh) {
+			pkt_shrink_stacked_bhlist(pd);
+			return 0;
+		}
+		pkt_put_stacked_bh(pd, bh);
+	}
+	return 1;
+}
+
+
+static request_queue_t *pkt_get_queue(kdev_t dev)
+{
+	struct pktcdvd_device *pd = pkt_find_dev(dev);
+	if (!pd)
+		return NULL;
+	return &pd->cdrw.r_queue;
+}
+
+static void pkt_put_buffer(struct buffer_head *bh)
+{
+	struct pktcdvd_device *pd = &pkt_devs[MINOR(bh->b_dev)];
+	unsigned long flags;
+
+	if (bh->b_list != PKT_BUF_LIST)
+		return;
+
+	bh->b_state = 0;
+	bh->b_reqnext = NULL;
+	bh->b_end_io = NULL;
+
+	spin_lock_irqsave(&pd->lock, flags);
+	bh->b_next = pd->cdrw.bhlist;
+	pd->cdrw.bhlist = bh;
+	spin_unlock_irqrestore(&pd->lock, flags);
+	atomic_inc(&pd->cdrw.free_bh);
+}
+
+static inline void __pkt_inject_request(request_queue_t *q, struct request *rq)
+{
+	struct list_head *head =  &q->queue_head;
+
+	VPRINTK("__pkt_inject_request: list_empty == %d, size=%d, cmd=%d\n",
+		list_empty(&q->queue_head), rq->bh->b_size >> 9, rq->cmd);
+
+	if (list_empty(&q->queue_head))
+		q->plug_device_fn(q, rq->rq_dev);
+
+	list_add_tail(&rq->queue, head);
+}
+
+static void pkt_inject_request(request_queue_t *q, struct request *rq)
+{
+	spin_lock_irq(&io_request_lock);
+	__pkt_inject_request(q, rq);
+	spin_unlock_irq(&io_request_lock);
+}
+
+static inline void __pkt_end_request(struct pktcdvd_device *pd)
+{
+	pd->rq = NULL;
+	clear_bit(PACKET_RQ, &pd->flags);
+	clear_bit(PACKET_BUSY, &pd->flags);
+}
+
+/*
+ * io_request_lock must be held and interrupts disabled
+ */
+static void pkt_end_request(struct pktcdvd_device *pd)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&pd->lock, flags);
+	__pkt_end_request(pd);
+	spin_unlock_irqrestore(&pd->lock, flags);
+}
+
+
+static inline void __pkt_kill_request(struct request *rq, int uptodate, char *name)
+{
+	struct buffer_head *bh = rq->bh, *nbh;
+
+	while (bh) {
+		nbh = bh->b_reqnext;
+		bh->b_reqnext = NULL;
+
+		if (bh->b_end_io) {
+			bh->b_end_io(bh, uptodate);
+		} else {
+			mark_buffer_clean(bh);
+			mark_buffer_uptodate(bh, uptodate);
+			unlock_buffer(bh);
+		}
+
+		bh = nbh;
+	}
+
+	end_that_request_last(rq);
+}
+
+
+void pkt_kill_request(struct pktcdvd_device *pd, struct request *rq, int ok)
+{
+	printk("pktcdvd: killing request\n");
+	spin_lock_irq(&io_request_lock);
+	__pkt_kill_request(rq, ok, pd->name);
+	spin_unlock_irq(&io_request_lock);
+	pkt_end_request(pd);
+}
+
+static void pkt_end_io_read(struct buffer_head *bh, int uptodate)
+{
+	if (!uptodate) {
+		/* Obviously not correct, but it avoids locking up the kernel */
+		printk("Ignoring read error on sector:%ld\n", bh->b_rsector);
+		uptodate = 1;
+	}
+
+	mark_buffer_uptodate(bh, uptodate);
+	unlock_buffer(bh);
+}
+
+/*
+ * if the buffer is already in the buffer cache, grab it if we can lock
+ * it down
+ */
+static struct buffer_head *pkt_get_hash(kdev_t dev, unsigned long block, int size)
+{
+	struct buffer_head *bh = NULL;
+
+	bh = get_hash_table(dev, block, size);
+	if (bh) {
+		if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
+			brelse(bh);
+			if (atomic_set_buffer_clean(bh))
+				refile_buffer(bh);
+			SetPageReferenced(bh->b_page);
+		} else {
+			brelse(bh);
+			bh = NULL;
+		}
+	}
+
+	return bh;
+}
+
+static inline struct buffer_head *__pkt_get_buffer(struct pktcdvd_device *pd,
+						   unsigned long sector)
+{
+	struct buffer_head *bh;
+
+	if (!atomic_read(&pd->cdrw.free_bh))
+		BUG();
+
+	atomic_dec(&pd->cdrw.free_bh);
+
+	spin_lock_irq(&pd->lock);
+	bh = pd->cdrw.bhlist;
+	pd->cdrw.bhlist = bh->b_next;
+	bh->b_next = NULL;
+	spin_unlock_irq(&pd->lock);
+
+	bh->b_next_free = NULL;
+	bh->b_prev_free = NULL;
+	bh->b_this_page = NULL;
+	bh->b_pprev = NULL;
+	bh->b_reqnext = NULL;
+
+	init_waitqueue_head(&bh->b_wait);
+	atomic_set(&bh->b_count, 1);
+	bh->b_list = PKT_BUF_LIST;
+	bh->b_state = (1 << BH_Mapped) | (1 << BH_Lock) | (1 << BH_Req);
+	bh->b_dev = pd->pkt_dev;
+
+	return bh;
+}
+
+static void pkt_end_io_write(struct buffer_head *, int);
+
+static struct buffer_head *pkt_get_buffer(struct pktcdvd_device *pd,
+					  unsigned long sector, int size)
+{
+	unsigned long block = sector / (size >> 9);
+	struct buffer_head *bh;
+
+	VPRINTK("get_buffer: sector=%ld, size=%d\n", sector, size);
+
+	bh = pkt_get_hash(pd->pkt_dev, block, size);
+	if (bh)
+		pd->stats.bh_cache_hits += (size >> 9);
+	else
+		bh = __pkt_get_buffer(pd, sector);
+
+	blk_started_io(bh->b_size >> 9);
+	bh->b_blocknr = block;
+	bh->b_end_io = pkt_end_io_write;
+	bh->b_rsector = sector;
+	bh->b_rdev = pd->dev;
+	return bh;
+}
+
+/*
+ * this rq is done -- io_request_lock must be held and interrupts disabled
+ */
+static void pkt_rq_end_io(struct pktcdvd_device *pd)
+{
+	unsigned long flags;
+
+	VPRINTK("pkt_rq_end_io: rq=%p, cmd=%d, q=%p\n", pd->rq, pd->rq->cmd, pd->rq->q);
+
+	spin_lock_irqsave(&pd->lock, flags);
+
+	/*
+	 * debug checks
+	 */
+	if (!test_bit(PACKET_RQ, &pd->flags))
+		printk("pktcdvd: rq_end_io: RQ not set\n");
+	if (!test_bit(PACKET_BUSY, &pd->flags))
+		printk("pktcdvd: rq_end_io: BUSY not set\n");
+
+	__pkt_end_request(pd);
+	wake_up(&pd->wqueue);
+	spin_unlock_irqrestore(&pd->lock, flags);
+}
+
+static inline void pkt_mark_readonly(struct pktcdvd_device *pd, int on)
+{
+	if (on)
+		set_bit(PACKET_READONLY, &pd->flags);
+	else
+		clear_bit(PACKET_READONLY, &pd->flags);
+}
+
+static inline void __pkt_end_io_write(struct pktcdvd_device *pd,
+				      struct buffer_head *bh, int uptodate)
+{
+	VPRINTK("end_io_write: bh=%ld, uptodate=%d\n", bh->b_blocknr, uptodate);
+
+	/*
+	 * general Linux bug, noone should clear the BH_Uptodate flag for
+	 * a failed write...
+	 */
+	if (uptodate)
+		mark_buffer_uptodate(bh, uptodate);
+	else {
+		printk("pktcdvd: %s: WRITE error sector %lu\n", pd->name, bh->b_rsector);
+#if 0
+		set_bit(PACKET_RECOVERY, &pd->flags);
+		wake_up(&pd->wqueue);
+#endif
+	}
+
+	pd->stats.bh_e++;
+
+	atomic_dec(&pd->wrqcnt);
+	if (atomic_read(&pd->wrqcnt) == 0) {
+		pkt_rq_end_io(pd);
+	}
+
+	unlock_buffer(bh);
+}
+
+/*
+ * we use this as our default b_end_io handler, since we need to take
+ * the entire request off the list if just one of the clusters fail.
+ * later on this should also talk to UDF about relocating blocks -- for
+ * now we just drop the rq entirely. when doing the relocating we must also
+ * lock the bh down to ensure that we can easily reconstruct the write should
+ * it fail.
+ */
+static void pkt_end_io_write(struct buffer_head *bh, int uptodate)
+{
+	struct pktcdvd_device *pd = &pkt_devs[MINOR(bh->b_rdev)];
+
+	__pkt_end_io_write(pd, bh, uptodate);
+	pkt_put_buffer(bh);
+}
+
+static void pkt_end_io_write_stacked(struct buffer_head *bh, int uptodate)
+{
+	struct pktcdvd_device *pd = &pkt_devs[MINOR(bh->b_rdev)];
+	struct buffer_head *rbh = bh->b_private;
+
+	__pkt_end_io_write(pd, bh, uptodate);
+	rbh->b_end_io(rbh, uptodate);
+	pkt_put_stacked_bh(pd, bh);
+	wake_up(&pd_bh_wait);
+}
+
+static int pkt_init_rq(struct pktcdvd_device *pd, struct request *rq)
+{
+	struct buffer_head *bh;
+	unsigned int cnt, nr_segments;
+
+	cnt = 0;
+	nr_segments = 1;
+	bh = rq->bh;
+	while (bh) {
+		struct buffer_head *nbh = bh->b_reqnext;
+
+		bh->b_rdev = pd->pkt_dev;
+
+		/*
+		 * the buffer better be uptodate, mapped, and locked!
+		 */
+		if (!buffer_uptodate(bh)) {
+			printk("%lu not uptodate\n", bh->b_rsector);
+			/*
+			 * It is not really the pktcdvd drivers problem if
+			 * someone wants to write stale data.
+			 */
+		}
+
+		if (!buffer_locked(bh) || !buffer_mapped(bh)) {
+			printk("%lu, state %lx\n", bh->b_rsector, bh->b_state);
+			BUG();
+		}
+
+		if (nbh) {
+			if (!CONTIG_BH(bh, nbh))
+				nr_segments++;
+
+			/*
+			 * if this happens, do report
+			 */
+			if ((bh->b_rsector + (bh->b_size >> 9))!=nbh->b_rsector) {
+				printk("%lu (%p)-> %lu (%p) (%lu in all)\n",
+				bh->b_rsector, bh, nbh->b_rsector, nbh,
+				rq->nr_sectors);
+				return 1;
+			}
+		}
+
+		cnt += bh->b_size >> 9;
+		bh = nbh;
+	}
+
+	rq->nr_segments = rq->nr_hw_segments = nr_segments;
+
+	if (cnt != rq->nr_sectors) {
+		printk("botched request %u (%lu)\n", cnt, rq->nr_sectors);
+		return 1;
+	}
+
+	return 0;
+}
+
+/*
+ * really crude stats for now...
+ */
+static void pkt_account_rq(struct pktcdvd_device *pd, int read, int written,
+			   int bs)
+{
+	pd->stats.bh_s += (written / bs);
+	pd->stats.secs_w += written;
+	pd->stats.secs_r += read;
+}
+
+/*
+ * does request span two packets? 0 == yes, 1 == no
+ */
+static int pkt_one_zone(struct pktcdvd_device *pd, struct request *rq)
+{
+	if (!pd->settings.size)
+		return 0;
+
+	if (!(rq->cmd & WRITE))
+		return 1;
+
+	return ZONE(rq->sector, pd) == ZONE(rq->sector + rq->nr_sectors -1, pd);
+}
+
+#if defined(CONFIG_CDROM_PKTCDVD_BEMPTY)
+static void pkt_init_buffer(struct buffer_head *bh)
+{
+	set_bit(BH_Uptodate, &bh->b_state);
+	set_bit(BH_Dirty, &bh->b_state);
+	memset(bh->b_data, 0, bh->b_size);
+}
+
+static int pkt_sb_empty(struct pktcdvd_device *pd, struct buffer_head *bh)
+{
+	struct super_block *sb;
+	struct super_operations *sop;
+	unsigned long packet;
+	int ret;
+
+	ret = 0;
+	if ((sb = get_super(pd->pkt_dev)) == NULL)
+		goto out;
+	if ((sop = sb->s_op) == NULL)
+		goto out;
+	if (sop->block_empty == NULL)
+		goto out;
+
+	packet = 0;
+	if (sop->block_empty(sb, bh->b_blocknr, &packet))  {
+		pkt_init_buffer(pd, bh);
+		ret = 1;
+	}
+
+out:
+	return ret;
+}
+
+#else /* defined(CONFIG_CDROM_PKTCDVD_BEMPTY) */
+
+static int pkt_sb_empty(struct pktcdvd_device *pd, struct buffer_head *bh)
+{
+	return 0;
+}
+
+#endif /* defined(CONFIG_CDROM_PKTCDVD_BEMPTY) */
+
+static int pkt_flush_cache(struct pktcdvd_device *pd);
+
+static void pkt_flush_writes(struct pktcdvd_device *pd)
+{
+	if (pd->unflushed_writes) {
+		pd->unflushed_writes = 0;
+		pkt_flush_cache(pd);
+	}
+}
+
+/*
+ * basically just does a ll_rw_block for the bhs given to use, but we
+ * don't return until we have them.
+ */
+static void pkt_read_bh(struct pktcdvd_device *pd, struct buffer_head *bh)
+{
+	/*
+	 * UDF says it's empty, woohoo
+	 */
+	if (pkt_sb_empty(pd, bh))
+		return;
+
+	down(&pd->cache_sync_mutex);
+	pkt_flush_writes(pd);
+	generic_make_request(READ, bh);
+	up(&pd->cache_sync_mutex);
+}
+
+static int pkt_index_bhs(struct buffer_head **bhs)
+{
+	struct buffer_head *bh;
+	int index;
+	int error = 0;
+
+	/*
+	 * now finish pending reads and connect the chain of buffers
+	 */
+	index = 0;
+	while (index < PACKET_MAX_SIZE) {
+		bh = bhs[index];
+
+		/*
+		 * pin down private buffers (ie, force I/O to complete)
+		 */
+		if (bh->b_end_io == pkt_end_io_read) {
+			lock_buffer(bh);
+			bh->b_end_io = pkt_end_io_write;
+		}
+
+		if (!buffer_locked(bh))
+			BUG();
+
+		if (!buffer_uptodate(bh)) {
+			printk("pktcdvd: read failure (%s, sec %lu)\n",
+				kdevname(bh->b_rdev), bh->b_rsector);
+			error = 1;
+		}
+
+		/*
+		 * attach previous
+		 */
+		if (index) {
+			struct buffer_head *pbh = bhs[index - 1];
+
+			if ((pbh->b_rsector + (pbh->b_size >> 9)) != bh->b_rsector) {
+				printk("%lu -> %lu\n", pbh->b_rsector, bh->b_rsector);
+				index = 0;
+				break;
+			}
+			pbh->b_reqnext = bh;
+		}
+		index++;
+	}
+
+	if (error)
+		return 0;
+
+	if (index) {
+		index--;
+		bhs[index]->b_reqnext = NULL;
+	}
+
+	return index;
+}
+
+/*
+ * fill in the holes of a request
+ *
+ * Returns: 0, keep 'em coming -- 1, stop queueing
+ */
+static int pkt_gather_data(struct pktcdvd_device *pd, struct request *rq)
+{
+	unsigned long start_s, end_s, sector;
+	struct buffer_head *bh;
+	unsigned int sectors, index;
+	struct buffer_head *bhs[PACKET_MAX_SIZE];
+
+	memset(bhs, 0, sizeof(bhs));
+
+	/*
+	 * all calculations are done with 512 byte sectors
+	 */
+	sectors = pd->settings.size - rq->nr_sectors;
+	start_s = rq->sector - (rq->sector & (pd->settings.size - 1));
+	end_s = start_s + pd->settings.size;
+
+	VPRINTK("pkt_gather_data: cmd=%d\n", rq->cmd);
+	VPRINTK("need %d sectors for %s\n", sectors, kdevname(pd->dev));
+	VPRINTK("from %lu to %lu ", start_s, end_s);
+	VPRINTK("(%lu - %lu)\n", rq->bh->b_rsector, rq->bhtail->b_rsector +
+				 rq->current_nr_sectors);
+
+	/*
+	 * first fill-out map of the buffers we have
+	 */
+	bh = rq->bh;
+	while (bh) {
+		index = (bh->b_rsector & (pd->settings.size - 1)) / (bh->b_size >> 9);
+
+		bhs[index] = bh;
+		bh = bh->b_reqnext;
+
+		/*
+		 * make sure to detach from list!
+		 */
+		bhs[index]->b_reqnext = NULL;
+	}
+
+	/*
+	 * now get buffers for missing blocks, and schedule reads for them
+	 */
+	for (index = 0, sector = start_s; sector < end_s; index++) {
+		if (bhs[index]) {
+			bh = bhs[index];
+			goto next;
+		}
+
+		bh = pkt_get_buffer(pd, sector, CD_FRAMESIZE);
+
+		bhs[index] = bh;
+		rq->nr_sectors += bh->b_size >> 9;
+		rq->nr_segments++;
+
+		if (!buffer_uptodate(bh)) {
+			bh->b_end_io = pkt_end_io_read;
+			pkt_read_bh(pd, bh);
+		}
+
+	next:
+		sector += bh->b_size >> 9;
+	}
+
+	index = pkt_index_bhs(bhs);
+#if 0
+	if (!index)
+		goto kill_it;
+#endif
+
+	rq->bh = bhs[0];
+	rq->bhtail = bhs[index];
+	rq->buffer = rq->bh->b_data;
+	rq->current_nr_sectors = rq->bh->b_size >> 9;
+	rq->hard_nr_sectors = rq->nr_sectors;
+	rq->sector = rq->hard_sector = start_s;
+
+	VPRINTK("unlocked last %lu\n", rq->bhtail->b_rsector);
+	if (pkt_init_rq(pd, rq)) {
+		for (index = 0; index < PACKET_MAX_SIZE; index++) {
+			bh = bhs[index];
+			printk("[%d] %lu %d (%p -> %p)\n", index, bh->b_rsector,
+					bh->b_size, bh, bh->b_reqnext);
+		}
+		goto kill_it;
+	}
+
+	pkt_account_rq(pd, sectors, rq->nr_sectors, rq->current_nr_sectors);
+
+	/*
+	 * sanity check
+	 */
+	if (rq->nr_sectors != pd->settings.size) {
+		printk("pktcdvd: request mismatch %lu (should be %u)\n",
+					rq->nr_sectors, pd->settings.size);
+		BUG();
+	}
+
+	return 0;
+
+	/*
+	 * for now, just kill entire request and hope for the best...
+	 */
+kill_it:
+	for (index = 0; index < PACKET_MAX_SIZE; index++) {
+		bh = bhs[index];
+		buffer_IO_error(bh);
+		if (bh->b_list == PKT_BUF_LIST)
+			pkt_put_buffer(bh);
+	}
+	end_that_request_last(pd->rq);
+	return 1;
+}
+
+/*
+ * Returns: 1, keep 'em coming -- 0, wait for wakeup
+ */
+static int pkt_do_request(struct pktcdvd_device *pd, struct request *rq)
+{
+	VPRINTK("do_request: bh=%ld, nr_sectors=%ld, size=%d, cmd=%d\n", rq->bh->b_blocknr, rq->nr_sectors, pd->settings.size, rq->cmd);
+
+	/*
+	 * perfect match. the merge_* functions have already made sure that
+	 * a request doesn't cross a packet boundary, so if the sector
+	 * count matches it's good.
+	 */
+	if (rq->nr_sectors == pd->settings.size) {
+		if (pkt_init_rq(pd, rq)) {
+			pkt_kill_request(pd, rq, 0);
+			return 1;
+		}
+
+		pkt_account_rq(pd, 0, rq->nr_sectors, rq->current_nr_sectors);
+		return 0;
+	}
+
+	/*
+	 * paranoia...
+	 */
+	if (rq->nr_sectors > pd->settings.size) {
+		printk("pktcdvd: request too big! BUG! %lu\n", rq->nr_sectors);
+		BUG();
+	}
+
+	return pkt_gather_data(pd, rq);
+}
+
+/*
+ * recover a failed write, query for relocation if possible
+ */
+static int pkt_start_recovery(struct pktcdvd_device *pd, struct request *rq)
+{
+	struct super_block *sb = get_super(pd->pkt_dev);
+	struct buffer_head *bhs[PACKET_MAX_SIZE], *bh, *obh;
+	unsigned long old_block, new_block, sector;
+	int i, sectors;
+
+	if (!sb || !sb->s_op || !sb->s_op->relocate_blocks)
+		goto fail;
+
+	old_block = (rq->sector & ~(pd->settings.size - 1)) / (rq->bh->b_size >> 9);
+	if (sb->s_op->relocate_blocks(sb, old_block, &new_block))
+		goto fail;
+
+	memset(bhs, 0, sizeof(bhs));
+	bh = rq->bh;
+	while (bh) {
+		i = (bh->b_rsector & (pd->settings.size - 1)) / (bh->b_size >> 9);
+
+		bhs[i] = bh;
+		bh = bh->b_reqnext;
+		bhs[i]->b_reqnext = NULL;
+	}
+
+	sectors = 0;
+	sector = new_block * (rq->bh->b_size >> 9);
+	for (i = 0; i < PACKET_MAX_SIZE; i++) {
+		bh = bhs[i];
+
+		/*
+		 * three cases -->
+		 *	1) bh is not there at all
+		 *	2) bh is there and not ours, get a new one and
+		 *	   invalidate this block for the future
+		 *	3) bh is there and ours, just change the sector
+		 */
+		if (!bh) {
+			obh = pkt_get_hash(pd->pkt_dev, new_block,CD_FRAMESIZE);
+			bh = __pkt_get_buffer(pd, sector);
+			if (obh) {
+				if (buffer_uptodate(obh)) {
+					memcpy(bh->b_data, obh->b_data, obh->b_size);
+					set_bit(BH_Uptodate, &bh->b_state);
+				}
+				unlock_buffer(obh);
+				bforget(obh);
+			}
+			bhs[i] = bh;
+		} else if (bh->b_list != PKT_BUF_LIST) {
+			bhs[i] = pkt_get_buffer(pd, sector, CD_FRAMESIZE);
+			memcpy(bhs[i]->b_data, bh->b_data, CD_FRAMESIZE);
+			unlock_buffer(bh);
+			bforget(bh);
+			bh = bhs[i];
+			set_bit(BH_Uptodate, &bh->b_state);
+		} else {
+			bh->b_rsector = sector;
+			bh->b_blocknr = new_block;
+		}
+
+		sector += (bh->b_size >> 9);
+		new_block++;
+		sectors +=  (bh->b_size >> 9);
+	}
+
+	i = pkt_index_bhs(bhs);
+	if (!i)
+		goto fail;
+
+	rq->bh = bhs[0];
+	rq->bhtail = bhs[i];
+	rq->buffer = rq->bh->b_data;
+	rq->current_nr_sectors = rq->bh->b_size >> 9;
+	rq->hard_nr_sectors = rq->nr_sectors = sectors;
+	rq->sector = rq->hard_sector = rq->bh->b_rsector;
+	rq->errors = 0;
+	clear_bit(PACKET_RECOVERY, &pd->flags);
+	clear_bit(PACKET_BUSY, &pd->flags);
+	return 0;
+
+fail:
+	printk("pktcdvd: rq recovery not possible\n");
+	pkt_kill_request(pd, rq, 0);
+	clear_bit(PACKET_RECOVERY, &pd->flags);
+	return 1;
+}
+
+/*
+ * handle the requests that got queued for this writer
+ *
+ * returns 0 for busy (already doing something), or 1 for queue new one
+ *
+ */
+static int pkt_handle_queue(struct pktcdvd_device *pd, request_queue_t *q)
+{
+	struct request *rq;
+	int ret;
+
+	VPRINTK("handle_queue\n");
+
+	/*
+	 * nothing for us to do
+	 */
+	if (!test_bit(PACKET_RQ, &pd->flags))
+		return 1;
+
+	spin_lock_irq(&pd->lock);
+	rq = pd->rq;
+	spin_unlock_irq(&pd->lock);
+
+	if (test_bit(PACKET_RECOVERY, &pd->flags))
+		if (pkt_start_recovery(pd, rq))
+			return 1;
+
+	/*
+	 * already being processed
+	 */
+	if (test_and_set_bit(PACKET_BUSY, &pd->flags))
+		return 0;
+
+	/*
+	 * nothing to do
+	 */
+	ret = 1;
+	if (rq == NULL) {
+		printk("handle_queue: pd BUSY+RQ, but no rq\n");
+		clear_bit(PACKET_RQ, &pd->flags);
+		goto out;
+	}
+
+	/*
+	 * reads are shipped directly to cd-rom, so they should not
+	 * pop up here
+	 */
+	if (rq->cmd == READ)
+		BUG();
+
+	if ((rq->current_nr_sectors << 9) != CD_FRAMESIZE) {
+		pkt_kill_request(pd, rq, 0);
+		goto out;
+	}
+
+	if (!pkt_do_request(pd, rq)) {
+		atomic_add(PACKET_MAX_SIZE, &pd->wrqcnt);
+		down(&pd->cache_sync_mutex);
+		pkt_inject_request(q, rq);
+		pd->unflushed_writes = 1;
+		up(&pd->cache_sync_mutex);
+		return 0;
+	}
+
+out:
+	clear_bit(PACKET_BUSY, &pd->flags);
+	return ret;
+}
+
+/*
+ * kpacketd is woken up, when writes have been queued for one of our
+ * registered devices
+ */
+static int kcdrwd(void *foobar)
+{
+	struct pktcdvd_device *pd = foobar;
+	request_queue_t *q, *my_queue;
+
+	/*
+	 * exit_files, mm (move to lazy-tlb, so context switches are come
+	 * extremely cheap) etc
+	 */
+	daemonize();
+
+	current->policy = SCHED_OTHER;
+	current->static_prio = -20;
+	sprintf(current->comm, pd->name);
+
+	spin_lock_irq(&current->sigmask_lock);
+	siginitsetinv(&current->blocked, sigmask(SIGKILL));
+	flush_signals(current);
+	spin_unlock_irq(&current->sigmask_lock);
+
+	q = blk_get_queue(pd->dev);
+	my_queue = blk_get_queue(pd->pkt_dev);
+
+	for (;;) {
+		DECLARE_WAITQUEUE(wait, current);
+
+		add_wait_queue(&pd->wqueue, &wait);
+
+		/*
+		 * if PACKET_BUSY is cleared, we can queue
+		 * another request. otherwise we need to unplug the
+		 * cd-rom queue and wait for buffers to be flushed
+		 * (which will then wake us up again when done).
+		 */
+		do {
+			pkt_handle_queue(pd, q);
+
+			set_current_state(TASK_INTERRUPTIBLE);
+
+			if (test_bit(PACKET_BUSY, &pd->flags))
+				break;
+
+			spin_lock_irq(&io_request_lock);
+			if (list_empty(&my_queue->queue_head)) {
+				spin_unlock_irq(&io_request_lock);
+				break;
+			}
+			set_current_state(TASK_RUNNING);
+
+			my_queue->request_fn(my_queue);
+			spin_unlock_irq(&io_request_lock);
+		} while (1);
+
+		generic_unplug_device(q);
+
+		schedule();
+		remove_wait_queue(&pd->wqueue, &wait);
+
+		/*
+		 * got SIGKILL
+		 */
+		if (signal_pending(current))
+			break;
+
+	}
+
+	complete_and_exit(&pd->cdrw.thr_compl, 0);
+	return 0;
+}
+
+static void pkt_attempt_remerge(struct pktcdvd_device *pd, request_queue_t *q,
+				struct request *rq)
+{
+	struct request *nxt;
+
+	while (!list_empty(&q->queue_head)) {
+		if (rq->nr_sectors == pd->settings.size)
+			break;
+
+		nxt = blkdev_entry_next_request(&q->queue_head);
+
+		if (ZONE(rq->sector, pd) != ZONE(nxt->sector, pd))
+			break;
+		else if (rq->sector + rq->nr_sectors > nxt->sector)
+			break;
+
+		rq->nr_sectors = rq->hard_nr_sectors += nxt->nr_sectors;
+		rq->bhtail->b_reqnext = nxt->bh;
+		rq->bhtail = nxt->bhtail;
+		list_del(&nxt->queue);
+		blkdev_release_request(nxt);
+	}
+}
+
+/*
+ * our request function.
+ *
+ * - reads are just tossed directly to the device, we don't care.
+ * - writes, regardless of size, are added as the current pd rq and
+ *   kcdrwd is woken up to handle it. kcdrwd will also make sure to
+ *   reinvoke this request handler, once the given request has been
+ *   processed.
+ *
+ * Locks: io_request_lock held
+ *
+ * Notes: all writers have their own queue, so all requests are for the
+ *	  the same device
+ */
+static void pkt_request(request_queue_t *q)
+{
+	struct pktcdvd_device *pd = (struct pktcdvd_device *) q->queuedata;
+	unsigned long flags;
+
+	if (list_empty(&q->queue_head))
+		return;
+
+	while (!list_empty(&q->queue_head)) {
+		struct request *rq = blkdev_entry_next_request(&q->queue_head);
+
+		VPRINTK("pkt_request: cmd=%d, rq=%p, rq->sector=%ld, rq->nr_sectors=%ld\n", rq->cmd, rq, rq->sector, rq->nr_sectors);
+
+		blkdev_dequeue_request(rq);
+
+		rq->rq_dev = pd->dev;
+
+		if (rq->cmd == READ)
+			BUG();
+
+		if (test_bit(PACKET_RECOVERY, &pd->flags))
+			break;
+
+		/*
+		 * paranoia, shouldn't trigger...
+		 */
+		if (!pkt_one_zone(pd, rq)) {
+			printk("rq->cmd=%d, rq->sector=%ld, rq->nr_sectors=%ld\n",
+				rq->cmd, rq->sector, rq->nr_sectors);
+			BUG();
+		}
+
+		pkt_attempt_remerge(pd, q, rq);
+
+		spin_lock_irqsave(&pd->lock, flags);
+
+		/*
+		 * already gathering data for another read. the
+		 * rfn will be reinvoked once that is done
+		 */
+		if (test_and_set_bit(PACKET_RQ, &pd->flags)) {
+			list_add(&rq->queue, &q->queue_head);
+			spin_unlock_irqrestore(&pd->lock, flags);
+			break;
+		}
+
+		if (pd->rq)
+			BUG();
+
+		pd->rq = rq;
+		spin_unlock_irqrestore(&pd->lock, flags);
+		break;
+	}
+	VPRINTK("wake up wait queue\n");
+	wake_up(&pd->wqueue);
+}
+
+static void pkt_print_settings(struct pktcdvd_device *pd)
+{
+	printk("pktcdvd: %s packets, ", pd->settings.fp ? "Fixed" : "Variable");
+	printk("%u blocks, ", pd->settings.size >> 2);
+	printk("Mode-%c disc\n", pd->settings.block_mode == 8 ? '1' : '2');
+}
+
+/*
+ * A generic sense dump / resolve mechanism should be implemented across
+ * all ATAPI + SCSI devices.
+ */
+static void pkt_dump_sense(struct request_sense *sense)
+{
+	char *info[9] = { "No sense", "Recovered error", "Not ready",
+			  "Medium error", "Hardware error", "Illegal request",
+			  "Unit attention", "Data protect", "Blank check" };
+
+	if (sense == NULL)
+		return;
+
+	if (sense->sense_key > 8) {
+		printk("pktcdvd: sense invalid\n");
+		return;
+	}
+
+	printk("pktcdvd: sense category %s ", info[sense->sense_key]);
+	printk("asc(%02x), ascq(%02x)\n", sense->asc, sense->ascq);
+}
+
+/*
+ * write mode select package based on pd->settings
+ */
+static int pkt_set_write_settings(struct pktcdvd_device *pd)
+{
+	struct cdrom_device_info *cdi = pd->cdi;
+	struct cdrom_generic_command cgc;
+	write_param_page *wp;
+	char buffer[128];
+	int ret, size;
+
+	memset(buffer, 0, sizeof(buffer));
+	init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
+	if ((ret = cdrom_mode_sense(cdi, &cgc, GPMODE_WRITE_PARMS_PAGE, 0)))
+		return ret;
+
+	size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff));
+	pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
+	if (size > sizeof(buffer))
+		size = sizeof(buffer);
+
+	/*
+	 * now get it all
+	 */
+	init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
+	if ((ret = cdrom_mode_sense(cdi, &cgc, GPMODE_WRITE_PARMS_PAGE, 0)))
+		return ret;
+
+	/*
+	 * write page is offset header + block descriptor length
+	 */
+	wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
+
+	wp->fp = pd->settings.fp;
+	wp->track_mode = pd->settings.track_mode;
+	wp->write_type = pd->settings.write_type;
+	wp->data_block_type = pd->settings.block_mode;
+
+	wp->multi_session = 0;
+
+#ifdef PACKET_USE_LS
+	wp->link_size = 7;
+	wp->ls_v = 1;
+#endif
+
+	if (wp->data_block_type == PACKET_BLOCK_MODE1) {
+		wp->session_format = 0;
+		wp->subhdr2 = 0x20;
+	} else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
+		wp->session_format = 0x20;
+		wp->subhdr2 = 8;
+#if 0
+		wp->mcn[0] = 0x80;
+		memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
+#endif
+	} else {
+		/*
+		 * paranoia
+		 */
+		printk("pktcdvd: write mode wrong %d\n", wp->data_block_type);
+		return 1;
+	}
+	wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
+
+	cgc.buflen = cgc.cmd[8] = size;
+	if ((ret = cdrom_mode_select(cdi, &cgc))) {
+		pkt_dump_sense(cgc.sense);
+		return ret;
+	}
+
+	pkt_print_settings(pd);
+	return 0;
+}
+
+/*
+ * 0 -- we can write to this track, 1 -- we can't
+ */
+static int pkt_good_track(track_information *ti)
+{
+	/*
+	 * only good for CD-RW at the moment, not DVD-RW
+	 */
+
+	/*
+	 * FIXME: only for FP
+	 */
+	if (ti->fp == 0)
+		return 0;
+
+	/*
+	 * "good" settings as per Mt Fuji.
+	 */
+	if (ti->rt == 0 && ti->blank == 0 && ti->packet == 1)
+		return 0;
+
+	if (ti->rt == 0 && ti->blank == 1 && ti->packet == 1)
+		return 0;
+
+	if (ti->rt == 1 && ti->blank == 0 && ti->packet == 1)
+		return 0;
+
+	printk("pktcdvd: bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
+	return 1;
+}
+
+/*
+ * 0 -- we can write to this disc, 1 -- we can't
+ */
+static int pkt_good_disc(struct pktcdvd_device *pd, disc_information *di)
+{
+	/*
+	 * for disc type 0xff we should probably reserve a new track.
+	 * but i'm not sure, should we leave this to user apps? probably.
+	 */
+	if (di->disc_type == 0xff) {
+		printk("pktcdvd: Unknown disc. No track?\n");
+		return 1;
+	}
+
+	if (di->disc_type != 0x20 && di->disc_type != 0) {
+		printk("pktcdvd: Wrong disc type (%x)\n", di->disc_type);
+		return 1;
+	}
+
+	if (di->erasable == 0) {
+		printk("pktcdvd: Disc not erasable\n");
+		return 1;
+	}
+
+	if (pd->track_status == PACKET_SESSION_RESERVED) {
+		printk("pktcdvd: Can't write to last track (reserved)\n");
+		return 1;
+	}
+
+	return 0;
+}
+
+static int pkt_probe_settings(struct pktcdvd_device *pd)
+{
+	disc_information di;
+	track_information ti;
+	int ret, track;
+
+	memset(&di, 0, sizeof(disc_information));
+	memset(&ti, 0, sizeof(track_information));
+
+	if ((ret = cdrom_get_disc_info(pd->dev, &di))) {
+		printk("failed get_disc\n");
+		return ret;
+	}
+
+	pd->disc_status = di.disc_status;
+	pd->track_status = di.border_status;
+
+	if (pkt_good_disc(pd, &di))
+		return -ENXIO;
+
+	printk("pktcdvd: inserted media is CD-R%s\n", di.erasable ? "W" : "");
+	pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
+
+	track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
+	if ((ret = cdrom_get_track_info(pd->dev, track, 1, &ti))) {
+		printk("pktcdvd: failed get_track\n");
+		return ret;
+	}
+
+	if (pkt_good_track(&ti)) {
+		printk("pktcdvd: can't write to this track\n");
+		return -ENXIO;
+	}
+
+	/*
+	 * we keep packet size in 512 byte units, makes it easier to
+	 * deal with request calculations.
+	 */
+	pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
+	if (pd->settings.size == 0) {
+		printk("pktcdvd: detected zero packet size!\n");
+		pd->settings.size = 128;
+	}
+	pd->settings.fp = ti.fp;
+	pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
+
+	if (ti.nwa_v) {
+		pd->nwa = be32_to_cpu(ti.next_writable);
+		set_bit(PACKET_NWA_VALID, &pd->flags);
+	}
+
+	/*
+	 * in theory we could use lra on -RW media as well and just zero
+	 * blocks that haven't been written yet, but in practice that
+	 * is just a no-go. we'll use that for -R, naturally.
+	 */
+	if (ti.lra_v) {
+		pd->lra = be32_to_cpu(ti.last_rec_address);
+		set_bit(PACKET_LRA_VALID, &pd->flags);
+	} else {
+		pd->lra = 0xffffffff;
+		set_bit(PACKET_LRA_VALID, &pd->flags);
+	}
+
+	/*
+	 * fine for now
+	 */
+	pd->settings.link_loss = 7;
+	pd->settings.write_type = 0;	/* packet */
+	pd->settings.track_mode = ti.track_mode;
+
+	/*
+	 * mode1 or mode2 disc
+	 */
+	switch (ti.data_mode) {
+		case PACKET_MODE1:
+			pd->settings.block_mode = PACKET_BLOCK_MODE1;
+			break;
+		case PACKET_MODE2:
+			pd->settings.block_mode = PACKET_BLOCK_MODE2;
+			break;
+		default:
+			printk("pktcdvd: unknown data mode\n");
+			return 1;
+	}
+	return 0;
+}
+
+/*
+ * enable/disable write caching on drive
+ */
+static int pkt_write_caching(struct pktcdvd_device *pd, int set)
+{
+	struct cdrom_generic_command cgc;
+	unsigned char buf[64];
+	int ret;
+
+	memset(buf, 0, sizeof(buf));
+	init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
+	cgc.buflen = pd->mode_offset + 12;
+
+	/*
+	 * caching mode page might not be there, so quiet this command
+	 */
+	cgc.quiet = 1;
+
+	if ((ret = cdrom_mode_sense(pd->cdi, &cgc, GPMODE_WCACHING_PAGE, 0)))
+		return ret;
+
+	buf[pd->mode_offset + 10] |= (!!set << 2);
+
+	cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
+	ret = cdrom_mode_select(pd->cdi, &cgc);
+	if (ret)
+		printk("pktcdvd: write caching control failed\n");
+	else if (!ret && set)
+		printk("pktcdvd: enabled write caching on %s\n", pd->name);
+	return ret;
+}
+
+/*
+ * flush the drive cache to media
+ */
+static int pkt_flush_cache(struct pktcdvd_device *pd)
+{
+	struct cdrom_generic_command cgc;
+
+	init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+	cgc.cmd[0] = GPCMD_FLUSH_CACHE;
+	cgc.quiet = 1;
+	cgc.timeout = 60*HZ;
+
+	/*
+	 * the IMMED bit -- we default to not setting it, although that
+	 * would allow a much faster close, this is safer
+	 */
+#if 0
+	cgc.cmd[1] = 1 << 1;
+#endif
+	return pd->cdi->ops->generic_packet(pd->cdi, &cgc);
+}
+
+/*
+ * Returns drive current write speed
+ */
+static int pkt_get_speed(struct pktcdvd_device *pd)
+{
+	struct cdrom_generic_command cgc;
+	unsigned char buf[64];
+	int ret, offset;
+
+	memset(buf, 0, sizeof(buf));
+	init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
+
+	ret = cdrom_mode_sense(pd->cdi, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
+	if (ret) {
+		cgc.buflen = pd->mode_offset + buf[pd->mode_offset + 9] + 2 +
+			     sizeof(struct mode_page_header);
+		ret = cdrom_mode_sense(pd->cdi, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
+		if (ret)
+			return ret;
+	}
+
+	offset = pd->mode_offset + 26;
+	pd->speed = ((buf[offset] << 8) | buf[offset + 1]) / 0xb0;
+	return 0;
+}
+
+/*
+ * speed is given as the normal factor, e.g. 4 for 4x
+ */
+static int pkt_set_speed(struct pktcdvd_device *pd, unsigned speed)
+{
+	struct cdrom_generic_command cgc;
+	unsigned read_speed;
+
+	/*
+	 * we set read and write time so that read spindle speed is one and
+	 * a half as fast as write. although a drive can typically read much
+	 * faster than write, this minimizes the spin up/down when we write
+	 * and gather data. maybe 1/1 factor is faster, needs a bit of testing.
+	 */
+	speed = speed * 0xb0;
+	read_speed = (speed * 3) >> 1;
+	read_speed = min_t(unsigned, read_speed, 0xffff);
+
+	init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+	cgc.cmd[0] = 0xbb;
+	cgc.cmd[2] = (read_speed >> 8) & 0xff;
+	cgc.cmd[3] = read_speed & 0xff;
+	cgc.cmd[4] = (speed >> 8) & 0xff;
+	cgc.cmd[5] = speed & 0xff;
+
+	return pd->cdi->ops->generic_packet(pd->cdi, &cgc);
+}
+
+/*
+ * Give me full power, Captain
+ */
+static int pkt_adjust_speed(struct pktcdvd_device *pd, int speed)
+{
+	disc_information dummy;
+	int ret;
+
+	/*
+	 * FIXME: do proper unified cap page, also, this isn't proper
+	 * Mt Fuji, but I think we can safely assume all drives support
+	 * it. A hell of a lot more than support the GET_PERFORMANCE
+	 * command (besides, we also use the old set speed command,
+	 * not the streaming feature).
+	 */
+	if ((ret = pkt_set_speed(pd, speed)))
+		return ret;
+
+	/*
+	 * just do something with the disc -- next read will contain the
+	 * maximum speed with this media
+	 */
+	if ((ret = cdrom_get_disc_info(pd->dev, &dummy)))
+		return ret;
+
+	if ((ret = pkt_get_speed(pd))) {
+		printk("pktcdvd: failed get speed\n");
+		return ret;
+	}
+
+	DPRINTK("pktcdvd: speed (R/W) %u/%u\n", (pd->speed * 3) / 2, pd->speed);
+	return 0;
+}
+
+static int pkt_open_write(struct pktcdvd_device *pd)
+{
+	int ret;
+
+	if ((ret = pkt_probe_settings(pd))) {
+		DPRINTK("pktcdvd: %s failed probe\n", pd->name);
+		return -EIO;
+	}
+
+	if ((ret = pkt_set_write_settings(pd))) {
+		DPRINTK("pktcdvd: %s failed saving write settings\n", pd->name);
+		return -EIO;
+	}
+
+	(void) pkt_write_caching(pd, USE_WCACHING);
+
+	if ((ret = pkt_adjust_speed(pd, 16))) {
+		DPRINTK("pktcdvd: %s couldn't set write speed\n", pd->name);
+		return -EIO;
+	}
+	return 0;
+}
+
+/*
+ * called at open time.
+ */
+static int pkt_open_dev(struct pktcdvd_device *pd, int write)
+{
+	int ret;
+	long lba;
+
+	if (!pd->dev)
+		return -ENXIO;
+
+	pd->bdev = bdget(kdev_t_to_nr(pd->dev));
+	if (!pd->bdev) {
+		printk("pktcdvd: can't find cdrom block device\n");
+		return -ENXIO;
+	}
+
+	if ((ret = blkdev_get(pd->bdev, FMODE_READ, 0, BDEV_FILE))) {
+		pd->bdev = NULL;
+		return ret;
+	}
+
+	if ((ret = cdrom_get_last_written(pd->dev, &lba))) {
+		printk("pktcdvd: cdrom_get_last_written failed\n");
+		return ret;
+	}
+
+	pkt_sizes[MINOR(pd->pkt_dev)] = lba << 1;
+
+	if (write) {
+		if ((ret = pkt_open_write(pd)))
+			return ret;
+		pkt_mark_readonly(pd, 0);
+	} else {
+		(void) pkt_adjust_speed(pd, 0xff);
+		pkt_mark_readonly(pd, 1);
+	}
+
+	if (write)
+		printk("pktcdvd: %lukB available on disc\n", lba << 1);
+
+	return 0;
+}
+
+/*
+ * called when the device is closed. makes sure that the device flushes
+ * the internal cache before we close.
+ */
+static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
+{
+	atomic_dec(&pd->refcnt);
+	if (atomic_read(&pd->refcnt) > 0)
+		return;
+
+	fsync_dev(pd->pkt_dev);
+
+	if (flush && pkt_flush_cache(pd))
+		DPRINTK("pktcdvd: %s not flushing cache\n", pd->name);
+
+	if (pd->bdev) {
+		blkdev_put(pd->bdev, BDEV_FILE);
+		pd->bdev = NULL;
+	}
+}
+
+static int pkt_open(struct inode *inode, struct file *file)
+{
+	struct pktcdvd_device *pd = NULL;
+	int ret;
+
+	VPRINTK("pktcdvd: entering open\n");
+
+	if (MINOR(inode->i_rdev) >= MAX_WRITERS) {
+		printk("pktcdvd: max %d writers supported\n", MAX_WRITERS);
+		ret = -ENODEV;
+		goto out;
+	}
+
+	/*
+	 * either device is not configured, or pktsetup is old and doesn't
+	 * use O_CREAT to create device
+	 */
+	pd = &pkt_devs[MINOR(inode->i_rdev)];
+	if (!pd->dev && !(file->f_flags & O_CREAT)) {
+		VPRINTK("pktcdvd: not configured and O_CREAT not set\n");
+		ret = -ENXIO;
+		goto out;
+	}
+
+	atomic_inc(&pd->refcnt);
+	if (atomic_read(&pd->refcnt) > 1) {
+		if (file->f_mode & FMODE_WRITE) {
+			VPRINTK("pktcdvd: busy open for write\n");
+			ret = -EBUSY;
+			goto out_dec;
+		}
+
+		/*
+		 * Not first open, everything is already set up
+		 */
+		return 0;
+	}
+
+	if (((file->f_flags & O_ACCMODE) != O_RDONLY) || !(file->f_flags & O_CREAT)) {
+		if (pkt_open_dev(pd, file->f_mode & FMODE_WRITE)) {
+			ret = -EIO;
+			goto out_dec;
+		}
+	}
+
+	/*
+	 * needed here as well, since ext2 (among others) may change
+	 * the blocksize at mount time
+	 */
+	set_blocksize(pd->pkt_dev, CD_FRAMESIZE);
+	return 0;
+
+out_dec:
+	atomic_dec(&pd->refcnt);
+	if (atomic_read(&pd->refcnt) == 0) {
+		if (pd->bdev) {
+			blkdev_put(pd->bdev, BDEV_FILE);
+			pd->bdev = NULL;
+		}
+	}
+out:
+	VPRINTK("pktcdvd: failed open (%d)\n", ret);
+	return ret;
+}
+
+static int pkt_close(struct inode *inode, struct file *file)
+{
+	struct pktcdvd_device *pd = &pkt_devs[MINOR(inode->i_rdev)];
+	int ret = 0;
+
+	if (pd->dev) {
+		int flush = !test_bit(PACKET_READONLY, &pd->flags);
+		pkt_release_dev(pd, flush);
+	}
+
+	return ret;
+}
+
+/*
+ * pktcdvd i/o elevator parts
+ */
+static inline int pkt_bh_rq_ordered(struct buffer_head *bh, struct request *rq,
+				    struct list_head *head)
+{
+	struct list_head *next;
+	struct request *next_rq;
+
+	next = rq->queue.next;
+	if (next == head)
+		return 0;
+
+	next_rq = blkdev_entry_to_request(next);
+	if (next_rq->rq_dev != rq->rq_dev)
+		return bh->b_rsector > rq->sector;
+
+	if (bh->b_rsector < next_rq->sector && bh->b_rsector > rq->sector)
+		return 1;
+
+	if (next_rq->sector > rq->sector)
+		return 0;
+
+	if (bh->b_rsector > rq->sector || bh->b_rsector < next_rq->sector)
+		return 1;
+
+	return 0;
+}
+
+static int pkt_elevator_merge(request_queue_t *q, struct request **req,
+			      struct list_head *head,
+			      struct buffer_head *bh, int rw,
+			      int max_sectors)
+{
+	struct list_head *entry = &q->queue_head;
+	unsigned int count = bh->b_size >> 9, ret = ELEVATOR_NO_MERGE;
+
+	if (bh->b_reqnext)
+		BUG();
+
+	VPRINTK("pkt_elevator_merge: rw=%d, ms=%d, bh=%lu, dev=%d\n", rw, max_sectors, bh->b_rsector, bh->b_rdev);
+
+	while ((entry = entry->prev) != head) {
+		struct request *__rq = blkdev_entry_to_request(entry);
+		if (__rq->waiting)
+			continue;
+		if (__rq->rq_dev != bh->b_rdev)
+			continue;
+		if (!*req && pkt_bh_rq_ordered(bh, __rq, &q->queue_head))
+			*req = __rq;
+		if (__rq->cmd != rw)
+			continue;
+		if (__rq->nr_sectors + count > max_sectors)
+			continue;
+		if (__rq->sector + __rq->nr_sectors == bh->b_rsector) {
+			ret = ELEVATOR_BACK_MERGE;
+			*req = __rq;
+			break;
+		} else if (__rq->sector - count == bh->b_rsector) {
+			ret = ELEVATOR_FRONT_MERGE;
+			*req = __rq;
+			break;
+		}
+#if 0 /* makes sense, chance of two matches probably slim */
+		else if (*req)
+			break;
+#endif
+	}
+	VPRINTK("*req=%p, ret=%d\n", *req, ret);
+
+	return ret;
+}
+
+static int pkt_make_request(request_queue_t *q, int rw, struct buffer_head *bh)
+{
+	struct pktcdvd_device *pd;
+	struct buffer_head *new_bh;
+
+	if (MINOR(bh->b_rdev) >= MAX_WRITERS) {
+		printk("pktcdvd: %s out of range\n", kdevname(bh->b_rdev));
+		goto end_io;
+	}
+
+	pd = &pkt_devs[MINOR(bh->b_rdev)];
+	if (!pd->dev) {
+		printk("pktcdvd: request received for non-active pd\n");
+		goto end_io;
+	}
+
+	/*
+	 * quick remap a READ
+	 */
+	if (rw == READ || rw == READA) {
+		down(&pd->cache_sync_mutex);
+		pkt_flush_writes(pd);
+		bh->b_rdev = pd->dev;
+		generic_make_request(rw, bh);
+		up(&pd->cache_sync_mutex);
+		return 0;
+	}
+
+	if (!(rw & WRITE))
+		BUG();
+
+	if (test_bit(PACKET_READONLY, &pd->flags)) {
+		printk("pktcdvd: WRITE for ro device %s (%lu)\n",
+			pd->name, bh->b_rsector);
+		goto end_io;
+	}
+
+	VPRINTK("pkt_make_request: bh:%p block:%ld size:%d\n",
+		bh, bh->b_blocknr, bh->b_size);
+
+	if (bh->b_size != CD_FRAMESIZE) {
+		printk("pktcdvd: wrong bh size\n");
+		goto end_io;
+	}
+
+	/*
+	 * This is deadlock safe, since pkt_get_stacked_bh can only
+	 * fail if there are already buffers in flight for this
+	 * packet device. When the in-flight buffers finish, we
+	 * will be woken up and try again.
+	 */
+	new_bh = kmem_cache_alloc(bh_cachep, GFP_ATOMIC);
+	while (!new_bh) {
+		DECLARE_WAITQUEUE(wait, current);
+
+		generic_unplug_device(q);
+
+		add_wait_queue(&pd_bh_wait, &wait);
+		set_current_state(TASK_UNINTERRUPTIBLE);
+
+		new_bh = pkt_get_stacked_bh(pd);
+		if (!new_bh)
+			schedule();
+
+		set_current_state(TASK_RUNNING);
+		remove_wait_queue(&pd_bh_wait, &wait);
+	}
+
+	new_bh->b_size = bh->b_size;
+	new_bh->b_list = PKT_BUF_LIST + 1;
+	new_bh->b_dev = bh->b_dev;
+	atomic_set(&new_bh->b_count, 1);
+	new_bh->b_rdev = bh->b_rdev;
+	new_bh->b_state = bh->b_state;
+	new_bh->b_page = bh->b_page;
+	new_bh->b_data = bh->b_data;
+	new_bh->b_private = bh;
+	new_bh->b_end_io = pkt_end_io_write_stacked;
+	new_bh->b_rsector = bh->b_rsector;
+
+	return pd->make_request_fn(q, rw, new_bh);
+
+end_io:
+	buffer_IO_error(bh);
+	return 0;
+}
+
+static void show_requests(request_queue_t *q)
+{
+	struct list_head *entry;
+
+	spin_lock_irq(&io_request_lock);
+
+	list_for_each(entry, &q->queue_head) {
+		struct request *rq = blkdev_entry_to_request(entry);
+		int zone = rq->sector & ~127;
+		int hole;
+
+		hole = 0;
+		if ((rq->sector + rq->nr_sectors - (rq->bhtail->b_size >> 9))
+		    != rq->bhtail->b_rsector)
+			hole = 1;
+
+		printk("rq: cmd %d, sector %lu (-> %lu), zone %u, hole %d, nr_sectors %lu\n", rq->cmd, rq->sector, rq->sector + rq->nr_sectors - 1, zone, hole, rq->nr_sectors);
+	}
+
+	spin_unlock_irq(&io_request_lock);
+}
+
+static void sysrq_handle_show_requests(int key, struct pt_regs *pt_regs,
+		struct kbd_struct *kbd, struct tty_struct *tty)
+{
+	/*
+	 * quick hack to show pending requests in /dev/pktcdvd0 queue
+	 */
+	queue_proc *qp = blk_dev[PACKET_MAJOR].queue;
+	if (qp) {
+		request_queue_t *q = qp(MKDEV(PACKET_MAJOR, 0));
+		if (q)
+			show_requests(q);
+	}
+}
+static struct sysrq_key_op sysrq_show_requests_op = {
+	handler:	sysrq_handle_show_requests,
+	help_msg:	"showreQuests",
+	action_msg:	"Show requests",
+};
+
+static void pkt_init_queue(struct pktcdvd_device *pd)
+{
+	request_queue_t *q = &pd->cdrw.r_queue;
+
+	blk_init_queue(q, pkt_request);
+	elevator_init(&q->elevator, ELEVATOR_PKTCDVD);
+	pd->make_request_fn = q->make_request_fn;
+	blk_queue_make_request(q, pkt_make_request);
+	blk_queue_headactive(q, 0);
+	q->front_merge_fn = pkt_front_merge_fn;
+	q->back_merge_fn = pkt_back_merge_fn;
+	q->merge_requests_fn = pkt_merge_requests_fn;
+	q->queuedata = pd;
+}
+
+static int pkt_proc_device(struct pktcdvd_device *pd, char *buf)
+{
+	char *b = buf, *msg;
+	struct list_head *foo;
+	int i;
+
+	b += sprintf(b, "\nWriter %s (%s):\n", pd->name, kdevname(pd->dev));
+
+	b += sprintf(b, "\nSettings:\n");
+	b += sprintf(b, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
+
+	if (pd->settings.write_type == 0)
+		msg = "Packet";
+	else
+		msg = "Unknown";
+	b += sprintf(b, "\twrite type:\t\t%s\n", msg);
+
+	b += sprintf(b, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
+	b += sprintf(b, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
+
+	b += sprintf(b, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
+
+	if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
+		msg = "Mode 1";
+	else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
+		msg = "Mode 2";
+	else
+		msg = "Unknown";
+	b += sprintf(b, "\tblock mode:\t\t%s\n", msg);
+
+	b += sprintf(b, "\nStatistics:\n");
+	b += sprintf(b, "\tbuffers started:\t%lu\n", pd->stats.bh_s);
+	b += sprintf(b, "\tbuffers ended:\t\t%lu\n", pd->stats.bh_e);
+	b += sprintf(b, "\tsectors written:\t%lu\n", pd->stats.secs_w);
+	b += sprintf(b, "\tsectors read:\t\t%lu\n", pd->stats.secs_r);
+	b += sprintf(b, "\tbuffer cache hits:\t%lu\n", pd->stats.bh_cache_hits);
+	b += sprintf(b, "\tpage cache hits:\t%lu\n", pd->stats.page_cache_hits);
+
+	b += sprintf(b, "\nMisc:\n");
+	b += sprintf(b, "\treference count:\t%d\n", atomic_read(&pd->refcnt));
+	b += sprintf(b, "\tflags:\t\t\t0x%lx\n", pd->flags);
+	b += sprintf(b, "\twrite speed:\t\t%ukB/s\n", pd->speed * 150);
+	b += sprintf(b, "\tstart offset:\t\t%lu\n", pd->offset);
+	b += sprintf(b, "\tmode page offset:\t%u\n", pd->mode_offset);
+
+	b += sprintf(b, "\nQueue state:\n");
+	b += sprintf(b, "\tfree buffers:\t\t%u\n", atomic_read(&pd->cdrw.free_bh));
+	b += sprintf(b, "\trequest active:\t\t%s\n", pd->rq ? "yes" : "no");
+	b += sprintf(b, "\twrite rq depth:\t\t%d\n", atomic_read(&pd->wrqcnt));
+
+	spin_lock_irq(&io_request_lock);
+	i = 0;
+	list_for_each(foo, &pd->cdrw.r_queue.queue_head)
+		i++;
+	spin_unlock_irq(&io_request_lock);
+	b += sprintf(b, "\tqueue requests:\t\t%u\n", i);
+
+	return b - buf;
+}
+
+static int pkt_read_proc(char *page, char **start, off_t off, int count,
+			 int *eof, void *data)
+{
+	struct pktcdvd_device *pd = data;
+	char *buf = page;
+	int len;
+
+	len = pkt_proc_device(pd, buf);
+	buf += len;
+
+	if (len <= off + count)
+		*eof = 1;
+
+	*start = page + off;
+	len -= off;
+	if (len > count)
+		len = count;
+	if (len < 0)
+		len = 0;
+
+	return len;
+}
+
+static int pkt_new_dev(struct pktcdvd_device *pd, kdev_t dev)
+{
+	struct cdrom_device_info *cdi;
+	request_queue_t *q;
+	int i;
+
+	for (i = 0; i < MAX_WRITERS; i++) {
+		if (pkt_devs[i].dev == dev) {
+			printk("pktcdvd: %s already setup\n", kdevname(dev));
+			return -EBUSY;
+		}
+	}
+
+	for (i = 0; i < MAX_WRITERS; i++)
+		if (pd == &pkt_devs[i])
+			break;
+	BUG_ON(i == MAX_WRITERS);
+
+	cdi = cdrom_find_device(dev);
+	if (cdi == NULL) {
+		printk("pktcdvd: %s is not a CD-ROM\n", kdevname(dev));
+		return -ENXIO;
+	}
+
+	MOD_INC_USE_COUNT;
+
+	memset(pd, 0, sizeof(struct pktcdvd_device));
+	atomic_set(&pd->cdrw.free_bh, 0);
+
+	spin_lock_init(&pd->lock);
+	if (pkt_grow_bhlist(pd, PACKET_MAX_SIZE) < PACKET_MAX_SIZE) {
+		MOD_DEC_USE_COUNT;
+		printk("pktcdvd: not enough memory for buffers\n");
+		return -ENOMEM;
+	}
+
+	pd->stacked_bhcnt = 0;
+	if (!pkt_grow_stacked_bhlist(pd)) {
+		MOD_DEC_USE_COUNT;
+		printk("pktcdvd: not enough memory for buffer heads\n");
+		return -ENOMEM;
+	}
+
+	set_blocksize(dev, CD_FRAMESIZE);
+	pd->cdi = cdi;
+	pd->dev = dev;
+	pd->bdev = NULL;
+	pd->pkt_dev = MKDEV(PACKET_MAJOR, i);
+	sprintf(pd->name, "pktcdvd%d", i);
+	atomic_set(&pd->refcnt, 0);
+	atomic_set(&pd->wrqcnt, 0);
+	init_MUTEX(&pd->cache_sync_mutex);
+	pd->unflushed_writes = 0;
+	init_waitqueue_head(&pd->wqueue);
+	init_completion(&pd->cdrw.thr_compl);
+
+	/*
+	 * store device merge functions (SCSI uses their own to build
+	 * scatter-gather tables)
+	 */
+	q = blk_get_queue(dev);
+	pkt_init_queue(pd);
+	pd->cdrw.front_merge_fn = q->front_merge_fn;
+	pd->cdrw.back_merge_fn = q->back_merge_fn;
+	pd->cdrw.merge_requests_fn = q->merge_requests_fn;
+	pd->cdrw.queuedata = q->queuedata;
+
+	pd->cdrw.pid = kernel_thread(kcdrwd, pd, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
+	if (pd->cdrw.pid < 0) {
+		MOD_DEC_USE_COUNT;
+		printk("pktcdvd: can't start kernel thread\n");
+		blk_cleanup_queue(&pd->cdrw.r_queue);
+		pkt_shrink_stacked_bhlist(pd);
+		pkt_shrink_bhlist(pd, PACKET_MAX_SIZE);
+		memset(pd, 0, sizeof(*pd));
+		return -EBUSY;
+	}
+
+	create_proc_read_entry(pd->name, 0, pkt_proc, pkt_read_proc, pd);
+	DPRINTK("pktcdvd: writer %s sucessfully registered\n", cdi->name);
+	return 0;
+}
+
+/*
+ * arg contains file descriptor of CD-ROM device.
+ */
+static int pkt_setup_dev(struct pktcdvd_device *pd, unsigned int arg)
+{
+	struct inode *inode;
+	struct file *file;
+	int ret;
+
+	if ((file = fget(arg)) == NULL) {
+		printk("pktcdvd: bad file descriptor passed\n");
+		return -EBADF;
+	}
+
+	ret = -EINVAL;
+	if ((inode = file->f_dentry->d_inode) == NULL) {
+		printk("pktcdvd: huh? file descriptor contains no inode?\n");
+		goto out;
+	}
+	ret = -ENOTBLK;
+	if (!S_ISBLK(inode->i_mode)) {
+		printk("pktcdvd: device is not a block device (duh)\n");
+		goto out;
+	}
+	ret = -EROFS;
+	if (IS_RDONLY(inode)) {
+		printk("pktcdvd: Can't write to read-only dev\n");
+		goto out;
+	}
+	if ((ret = pkt_new_dev(pd, inode->i_rdev))) {
+		printk("pktcdvd: all booked up\n");
+		goto out;
+	}
+
+	atomic_inc(&pd->refcnt);
+
+out:
+	fput(file);
+	return ret;
+}
+
+static int pkt_remove_dev(struct pktcdvd_device *pd)
+{
+	int ret;
+
+	if (pd->cdrw.pid >= 0) {
+		ret = kill_proc(pd->cdrw.pid, SIGKILL, 1);
+		if (ret) {
+			printk("pkt_exit: can't kill kernel thread\n");
+			return ret;
+		}
+		wait_for_completion(&pd->cdrw.thr_compl);
+	}
+
+	/*
+	 * will also invalidate buffers for CD-ROM
+	 */
+	invalidate_device(pd->pkt_dev, 1);
+
+	pkt_shrink_stacked_bhlist(pd);
+	if ((ret = pkt_shrink_bhlist(pd, PACKET_MAX_SIZE)) != PACKET_MAX_SIZE)
+		printk("pktcdvd: leaked %d buffers\n", PACKET_MAX_SIZE - ret);
+
+	blk_cleanup_queue(&pd->cdrw.r_queue);
+	remove_proc_entry(pd->name, pkt_proc);
+	DPRINTK("pktcdvd: writer %s unregistered\n", pd->cdi->name);
+	memset(pd, 0, sizeof(struct pktcdvd_device));
+	MOD_DEC_USE_COUNT;
+	return 0;
+}
+
+static int pkt_media_change(kdev_t dev)
+{
+	struct pktcdvd_device *pd = pkt_find_dev(dev);
+	if (!pd)
+		return 0;
+	return cdrom_media_changed(pd->dev);
+}
+
+static int pkt_ioctl(struct inode *inode, struct file *file,
+		     unsigned int cmd, unsigned long arg)
+{
+	struct pktcdvd_device *pd = &pkt_devs[MINOR(inode->i_rdev)];
+
+	VPRINTK("pkt_ioctl: cmd %d, dev %x\n", cmd, inode->i_rdev);
+
+	if ((cmd != PACKET_SETUP_DEV) && !pd->dev) {
+		DPRINTK("pktcdvd: dev not setup\n");
+		return -ENXIO;
+	}
+
+	switch (cmd) {
+	case PACKET_GET_STATS:
+		if (copy_to_user(&arg, &pd->stats, sizeof(struct packet_stats)))
+			return -EFAULT;
+		break;
+
+	case PACKET_SETUP_DEV:
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+		if (pd->dev) {
+			printk("pktcdvd: dev already setup\n");
+			return -EBUSY;
+		}
+		return pkt_setup_dev(pd, arg);
+
+	case PACKET_TEARDOWN_DEV:
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+		if (atomic_read(&pd->refcnt) != 1)
+			return -EBUSY;
+		return pkt_remove_dev(pd);
+
+	case BLKGETSIZE:
+		return put_user(blk_size[PACKET_MAJOR][MINOR(inode->i_rdev)] << 1, (unsigned long *)arg);
+
+	case BLKGETSIZE64:
+		return put_user((u64)blk_size[PACKET_MAJOR][MINOR(inode->i_rdev)] << 10,
+				(u64 *)arg);
+
+	case BLKROSET:
+		if (capable(CAP_SYS_ADMIN))
+			set_bit(PACKET_READONLY, &pd->flags);
+	case BLKROGET:
+	case BLKSSZGET:
+	case BLKRASET:
+	case BLKRAGET:
+	case BLKFLSBUF:
+		if (!pd->bdev)
+			return -ENXIO;
+		return blk_ioctl(inode->i_rdev, cmd, arg);
+
+	/*
+	 * forward selected CDROM ioctls to CD-ROM, for UDF
+	 */
+	case CDROMMULTISESSION:
+	case CDROMREADTOCENTRY:
+	case CDROM_LAST_WRITTEN:
+	case CDROM_SEND_PACKET:
+	case SCSI_IOCTL_SEND_COMMAND:
+		if (!pd->bdev)
+			return -ENXIO;
+		return ioctl_by_bdev(pd->bdev, cmd, arg);
+
+	default:
+		printk("pktcdvd: Unknown ioctl for %s (%x)\n", pd->name, cmd);
+		return -ENOTTY;
+	}
+
+	return 0;
+}
+
+static struct block_device_operations pktcdvd_ops = {
+	owner:			THIS_MODULE,
+	open:			pkt_open,
+	release:		pkt_close,
+	ioctl:			pkt_ioctl,
+	check_media_change:	pkt_media_change,
+};
+
+int pkt_init(void)
+{
+	int i;
+
+	devfs_register(NULL, "pktcdvd", DEVFS_FL_DEFAULT, PACKET_MAJOR, 0,
+		       S_IFBLK | S_IRUSR | S_IWUSR, &pktcdvd_ops, NULL);
+	if (devfs_register_blkdev(PACKET_MAJOR, "pktcdvd", &pktcdvd_ops)) {
+		printk("unable to register pktcdvd device\n");
+		return -EIO;
+	}
+
+	pkt_sizes = kmalloc(MAX_WRITERS * sizeof(int), GFP_KERNEL);
+	if (pkt_sizes == NULL)
+		goto err;
+
+	pkt_blksize = kmalloc(MAX_WRITERS * sizeof(int), GFP_KERNEL);
+	if (pkt_blksize == NULL)
+		goto err;
+
+	pkt_readahead = kmalloc(MAX_WRITERS * sizeof(int), GFP_KERNEL);
+	if (pkt_readahead == NULL)
+		goto err;
+
+	pkt_devs = kmalloc(MAX_WRITERS * sizeof(struct pktcdvd_device), GFP_KERNEL);
+	if (pkt_devs == NULL)
+		goto err;
+
+	memset(pkt_devs, 0, MAX_WRITERS * sizeof(struct pktcdvd_device));
+	memset(pkt_sizes, 0, MAX_WRITERS * sizeof(int));
+	memset(pkt_blksize, 0, MAX_WRITERS * sizeof(int));
+
+	for (i = 0; i < MAX_WRITERS; i++)
+		pkt_readahead[i] = vm_max_readahead;
+
+	blk_size[PACKET_MAJOR] = pkt_sizes;
+	blksize_size[PACKET_MAJOR] = pkt_blksize;
+	max_readahead[PACKET_MAJOR] = pkt_readahead;
+	read_ahead[PACKET_MAJOR] = 128;
+	set_blocksize(MKDEV(PACKET_MAJOR, 0), CD_FRAMESIZE);
+
+	blk_dev[PACKET_MAJOR].queue = pkt_get_queue;
+
+	pkt_proc = proc_mkdir("pktcdvd", proc_root_driver);
+
+	register_sysrq_key('q', &sysrq_show_requests_op);
+
+	DPRINTK("pktcdvd: %s\n", VERSION_CODE);
+	return 0;
+
+err:
+	printk("pktcdvd: out of memory\n");
+	devfs_unregister(devfs_find_handle(NULL, "pktcdvd", 0, 0,
+		 	 DEVFS_SPECIAL_BLK, 0));
+	devfs_unregister_blkdev(PACKET_MAJOR, "pktcdvd");
+	kfree(pkt_devs);
+	kfree(pkt_sizes);
+	kfree(pkt_blksize);
+	kfree(pkt_readahead);
+	return -ENOMEM;
+}
+
+void pkt_exit(void)
+{
+	unregister_sysrq_key('q', &sysrq_show_requests_op);
+
+	devfs_unregister(devfs_find_handle(NULL, "pktcdvd", 0, 0,
+		 	 DEVFS_SPECIAL_BLK, 0));
+	devfs_unregister_blkdev(PACKET_MAJOR, "pktcdvd");
+	blk_dev[PACKET_MAJOR].queue = NULL;
+
+	remove_proc_entry("pktcdvd", proc_root_driver);
+	kfree(pkt_sizes);
+	kfree(pkt_blksize);
+	kfree(pkt_devs);
+	kfree(pkt_readahead);
+}
+
+MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
+MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
+MODULE_LICENSE("GPL");
+
+module_init(pkt_init);
+module_exit(pkt_exit);
diff -u -r -N ../../linus/2.4/linux/drivers/cdrom/Makefile linux/drivers/cdrom/Makefile
--- ../../linus/2.4/linux/drivers/cdrom/Makefile	Tue Aug  6 21:14:34 2002
+++ linux/drivers/cdrom/Makefile	Tue Aug  6 21:22:09 2002
@@ -27,6 +27,7 @@
 obj-$(CONFIG_BLK_DEV_IDECD)	+=              cdrom.o
 obj-$(CONFIG_BLK_DEV_SR)	+=              cdrom.o
 obj-$(CONFIG_PARIDE_PCD)	+=		cdrom.o
+obj-$(CONFIG_CDROM_PKTCDVD)	+=		cdrom.o
 
 obj-$(CONFIG_AZTCD)		+= aztcd.o
 obj-$(CONFIG_CDU31A)		+= cdu31a.o     cdrom.o
diff -u -r -N ../../linus/2.4/linux/drivers/ide/ide-cd.c linux/drivers/ide/ide-cd.c
--- ../../linus/2.4/linux/drivers/ide/ide-cd.c	Tue Aug  6 21:14:39 2002
+++ linux/drivers/ide/ide-cd.c	Tue Aug  6 21:22:36 2002
@@ -292,9 +292,11 @@
  *			  correctly reporting tray status -- from
  *			  Michael D Johnson <johnsom@orst.edu>
  *
+ * 4.99			- Added write support for packet writing.
+ *
  *************************************************************************/
  
-#define IDECD_VERSION "4.59"
+#define IDECD_VERSION "4.99"
 
 #include <linux/config.h>
 #include <linux/module.h>
@@ -526,7 +528,7 @@
 
 	memset(pc, 0, sizeof(struct packet_command));
 	pc->c[0] = GPCMD_REQUEST_SENSE;
-	pc->c[4] = pc->buflen = 18;
+	pc->c[4] = pc->buflen = 14;
 	pc->buffer = (char *) sense;
 	pc->sense = (struct request_sense *) failed_command;
 
@@ -640,7 +642,7 @@
 			cdrom_saw_media_change (drive);
 
 			/* Fail the request. */
-			printk ("%s: tray open\n", drive->name);
+			/* printk ("%s: tray open\n", drive->name); */
 			cdrom_end_request (0, drive);
 		} else if (sense_key == UNIT_ATTENTION) {
 			/* Media change. */
@@ -1200,6 +1202,8 @@
 	 * partitions not really working, but better check anyway...
 	 */
 	if (rq->cmd == nxt->cmd && rq->rq_dev == nxt->rq_dev) {
+		if (rq->cmd == WRITE)
+			printk("merged write\n");
 		rq->nr_sectors += nxt->nr_sectors;
 		rq->hard_nr_sectors += nxt->nr_sectors;
 		rq->bhtail->b_reqnext = nxt->bh;
@@ -2497,6 +2501,12 @@
 static
 void ide_cdrom_release_real (struct cdrom_device_info *cdi)
 {
+	struct cdrom_generic_command cgc;
+
+	init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+	cgc.cmd[0] = GPCMD_FLUSH_CACHE;
+	cgc.quiet = 1;
+	(void) ide_cdrom_packet(cdi, &cgc);
 }
 
 
@@ -2685,15 +2695,10 @@
 		printk(" %dX", CDROM_CONFIG_FLAGS(drive)->max_speed);
 	printk(" %s", CDROM_CONFIG_FLAGS(drive)->dvd ? "DVD-ROM" : "CD-ROM");
 
-	if (CDROM_CONFIG_FLAGS (drive)->dvd_r|CDROM_CONFIG_FLAGS (drive)->dvd_ram)
-        	printk (" DVD%s%s", 
-        	(CDROM_CONFIG_FLAGS (drive)->dvd_r)? "-R" : "", 
-        	(CDROM_CONFIG_FLAGS (drive)->dvd_ram)? "-RAM" : "");
-
-        if (CDROM_CONFIG_FLAGS (drive)->cd_r|CDROM_CONFIG_FLAGS (drive)->cd_rw) 
-        	printk (" CD%s%s", 
-        	(CDROM_CONFIG_FLAGS (drive)->cd_r)? "-R" : "", 
-        	(CDROM_CONFIG_FLAGS (drive)->cd_rw)? "/RW" : "");
+	if (CDROM_CONFIG_FLAGS(drive)->dvd_r || CDROM_CONFIG_FLAGS(drive)->dvd_ram)
+		printk (" DVD-R%s", (CDROM_CONFIG_FLAGS (drive)->dvd_ram)? "AM" : "");
+	if (CDROM_CONFIG_FLAGS(drive)->cd_r ||CDROM_CONFIG_FLAGS(drive)->cd_rw)
+		printk (" CD-R%s", (CDROM_CONFIG_FLAGS (drive)->cd_rw)? "/RW" : "");
 
         if (CDROM_CONFIG_FLAGS (drive)->is_changer) 
         	printk (" changer w/%d slots", nslots);
@@ -2716,7 +2721,7 @@
 	int major = HWIF(drive)->major;
 	int minor = drive->select.b.unit << PARTN_BITS;
 
-	ide_add_setting(drive,	"breada_readahead",	SETTING_RW, BLKRAGET, BLKRASET, TYPE_INT, 0, 255, 1, 2, &read_ahead[major], NULL);
+	ide_add_setting(drive,	"breada_readahead",	SETTING_RW, BLKRAGET, BLKRASET, TYPE_INT, 0, 255, 1, 1024, &read_ahead[major], NULL);
 	ide_add_setting(drive,	"file_readahead",	SETTING_RW, BLKFRAGET, BLKFRASET, TYPE_INTA, 0, INT_MAX, 1, 1024, &max_readahead[major][minor],	NULL);
 	ide_add_setting(drive,	"max_kb_per_request",	SETTING_RW, BLKSECTGET, BLKSECTSET, TYPE_INTA, 1, 255, 1, 2, &max_sectors[major][minor], NULL);
 	ide_add_setting(drive,	"dsc_overlap",		SETTING_RW, -1, -1, TYPE_BYTE, 0, 1, 1,	1, &drive->dsc_overlap, NULL);
@@ -2733,7 +2738,7 @@
 	/*
 	 * default to read-only always and fix latter at the bottom
 	 */
-	set_device_ro(MKDEV(HWIF(drive)->major, minor), 1);
+	set_device_ro(MKDEV(HWIF(drive)->major, minor), 0);
 	set_blocksize(MKDEV(HWIF(drive)->major, minor), CD_FRAMESIZE);
 
 	drive->special.all	= 0;
diff -u -r -N ../../linus/2.4/linux/drivers/scsi/Config.in linux/drivers/scsi/Config.in
--- ../../linus/2.4/linux/drivers/scsi/Config.in	Tue Aug  6 21:15:02 2002
+++ linux/drivers/scsi/Config.in	Tue Aug  6 21:23:16 2002
@@ -20,10 +20,6 @@
 
 comment 'Some SCSI devices (e.g. CD jukebox) support multiple LUNs'
 
-#if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
-   bool '  Enable extra checks in new queueing code' CONFIG_SCSI_DEBUG_QUEUES
-#fi
-
 bool '  Probe all LUNs on each SCSI device' CONFIG_SCSI_MULTI_LUN
   
 bool '  Verbose SCSI error reporting (kernel size +=12K)' CONFIG_SCSI_CONSTANTS
diff -u -r -N ../../linus/2.4/linux/drivers/scsi/scsi_merge.c linux/drivers/scsi/scsi_merge.c
--- ../../linus/2.4/linux/drivers/scsi/scsi_merge.c	Tue Aug  6 21:15:21 2002
+++ linux/drivers/scsi/scsi_merge.c	Tue Aug  6 21:23:18 2002
@@ -71,11 +71,6 @@
  */
 #define DMA_SEGMENT_SIZE_LIMITED
 
-#ifdef CONFIG_SCSI_DEBUG_QUEUES
-/*
- * Enable a bunch of additional consistency checking.   Turn this off
- * if you are benchmarking.
- */
 static int dump_stats(struct request *req,
 		      int use_clustering,
 		      int dma_host,
@@ -100,22 +95,6 @@
 	panic("Ththththaats all folks.  Too dangerous to continue.\n");
 }
 
-
-/*
- * Simple sanity check that we will use for the first go around
- * in order to ensure that we are doing the counting correctly.
- * This can be removed for optimization.
- */
-#define SANITY_CHECK(req, _CLUSTER, _DMA)				\
-    if( req->nr_segments != __count_segments(req, _CLUSTER, _DMA, NULL) )	\
-    {									\
-	printk("Incorrect segment count at 0x%p", current_text_addr());	\
-	dump_stats(req, _CLUSTER, _DMA, __count_segments(req, _CLUSTER, _DMA, NULL)); \
-    }
-#else
-#define SANITY_CHECK(req, _CLUSTER, _DMA)
-#endif
-
 static void dma_exhausted(Scsi_Cmnd * SCpnt, int i)
 {
 	int jj;
@@ -532,7 +511,6 @@
 		     int max_segments)					\
 {									\
     int ret;								\
-    SANITY_CHECK(req, _CLUSTER, _DMA);					\
     ret =  __scsi_ ## _BACK_FRONT ## _merge_fn(q,			\
 					       req,			\
 					       bh,			\
@@ -742,7 +720,6 @@
 		     int max_segments)			\
 {							\
     int ret;						\
-    SANITY_CHECK(req, _CLUSTER, _DMA);			\
     ret =  __scsi_merge_requests_fn(q, req, next, max_segments, _CLUSTER, _DMA); \
     return ret;						\
 }
@@ -829,11 +806,7 @@
 	/*
 	 * First we need to know how many scatter gather segments are needed.
 	 */
-	if (!sg_count_valid) {
-		count = __count_segments(req, use_clustering, dma_host, NULL);
-	} else {
-		count = req->nr_segments;
-	}
+	count = __count_segments(req, use_clustering, dma_host, NULL);
 
 	/*
 	 * If the dma pool is nearly empty, then queue a minimal request
@@ -949,9 +922,7 @@
 	 */
 	if (count != SCpnt->use_sg) {
 		printk("Incorrect number of segments after building list\n");
-#ifdef CONFIG_SCSI_DEBUG_QUEUES
 		dump_stats(req, use_clustering, dma_host, count);
-#endif
 	}
 	if (!dma_host) {
 		return 1;
diff -u -r -N ../../linus/2.4/linux/drivers/scsi/sr.c linux/drivers/scsi/sr.c
--- ../../linus/2.4/linux/drivers/scsi/sr.c	Tue Aug  6 21:15:22 2002
+++ linux/drivers/scsi/sr.c	Tue Aug  6 21:23:18 2002
@@ -28,12 +28,16 @@
  *       Modified by Jens Axboe <axboe@suse.de> - support DVD-RAM
  *	 transparently and loose the GHOST hack
  *
+ *          Modified by Jens Axboe <axboe@suse.de> - support packet writing
+ *          through generic packet layer.
+ *
  *	 Modified by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
  *	 check resource allocation in sr_init and some cleanups
  *
  */
 
 #include <linux/module.h>
+#include <linux/config.h>
 
 #include <linux/fs.h>
 #include <linux/kernel.h>
@@ -716,7 +720,7 @@
 	cmd[2] = 0x2a;
 	cmd[4] = 128;
 	cmd[3] = cmd[5] = 0;
-	rc = sr_do_ioctl(i, cmd, buffer, 128, 1, SCSI_DATA_READ, NULL);
+	rc = sr_do_ioctl(i, cmd, buffer, 128, 1, SCSI_DATA_READ, NULL, SR_TIMEOUT);
 
 	if (rc) {
 		/* failed, drive doesn't have capabilities mode page */
@@ -748,16 +752,13 @@
 	if ((buffer[n + 2] & 0x8) == 0)
 		/* not a DVD drive */
 		scsi_CDs[i].cdi.mask |= CDC_DVD;
-	if ((buffer[n + 3] & 0x20) == 0) {
+	if ((buffer[n + 3] & 0x20) == 0)
 		/* can't write DVD-RAM media */
 		scsi_CDs[i].cdi.mask |= CDC_DVD_RAM;
-	} else {
-		scsi_CDs[i].device->writeable = 1;
-	}
 	if ((buffer[n + 3] & 0x10) == 0)
 		/* can't write DVD-R media */
 		scsi_CDs[i].cdi.mask |= CDC_DVD_R;
-	if ((buffer[n + 3] & 0x2) == 0)
+	if ((buffer[n + 3] & 0x02) == 0)
 		/* can't write CD-RW media */
 		scsi_CDs[i].cdi.mask |= CDC_CD_RW;
 	if ((buffer[n + 3] & 0x1) == 0)
@@ -777,6 +778,10 @@
 	/*else    I don't think it can close its tray
 	   scsi_CDs[i].cdi.mask |= CDC_CLOSE_TRAY; */
 
+	if (~scsi_CDs[i].cdi.mask & (CDC_DVD_RAM | CDC_CD_RW))
+		/* can write to DVD-RAM or CD-RW */
+		scsi_CDs[i].device->writeable = 1;
+
 	scsi_free(buffer, 512);
 }
 
@@ -792,7 +797,10 @@
 	if (device->scsi_level <= SCSI_2)
 		cgc->cmd[1] |= device->lun << 5;
 
-	cgc->stat = sr_do_ioctl(MINOR(cdi->dev), cgc->cmd, cgc->buffer, cgc->buflen, cgc->quiet, cgc->data_direction, cgc->sense);
+	if (cgc->timeout <= 0)
+		cgc->timeout = 5 * HZ;
+
+	cgc->stat = sr_do_ioctl(MINOR(cdi->dev), cgc->cmd, cgc->buffer, cgc->buflen, cgc->quiet, cgc->data_direction, cgc->sense, cgc->timeout);
 
 	return cgc->stat;
 }
diff -u -r -N ../../linus/2.4/linux/drivers/scsi/sr.h linux/drivers/scsi/sr.h
--- ../../linus/2.4/linux/drivers/scsi/sr.h	Tue Aug  6 21:15:22 2002
+++ linux/drivers/scsi/sr.h	Tue Aug  6 21:23:18 2002
@@ -36,7 +36,7 @@
 
 extern Scsi_CD *scsi_CDs;
 
-int sr_do_ioctl(int, unsigned char *, void *, unsigned, int, int, struct request_sense *);
+int sr_do_ioctl(int, unsigned char *, void *, unsigned, int, int, struct request_sense *, int);
 
 int sr_lock_door(struct cdrom_device_info *, int);
 int sr_tray_move(struct cdrom_device_info *, int);
diff -u -r -N ../../linus/2.4/linux/drivers/scsi/sr_ioctl.c linux/drivers/scsi/sr_ioctl.c
--- ../../linus/2.4/linux/drivers/scsi/sr_ioctl.c	Tue Aug  6 21:15:22 2002
+++ linux/drivers/scsi/sr_ioctl.c	Tue Aug  6 21:23:18 2002
@@ -68,14 +68,14 @@
 	sr_cmd[6] = trk1_te.cdte_addr.msf.minute;
 	sr_cmd[7] = trk1_te.cdte_addr.msf.second;
 	sr_cmd[8] = trk1_te.cdte_addr.msf.frame;
-	return sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL);
+	return sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL, IOCTL_TIMEOUT);
 }
 
 /* We do our own retries because we want to know what the specific
    error code is.  Normally the UNIT_ATTENTION code will automatically
    clear after one error */
 
-int sr_do_ioctl(int target, unsigned char *sr_cmd, void *buffer, unsigned buflength, int quiet, int readwrite, struct request_sense *sense)
+int sr_do_ioctl(int target, unsigned char *sr_cmd, void *buffer, unsigned buflength, int quiet, int readwrite, struct request_sense *sense, int timeout)
 {
 	Scsi_Request *SRpnt;
 	Scsi_Device *SDev;
@@ -109,7 +109,7 @@
 
 
 	scsi_wait_req(SRpnt, (void *) sr_cmd, (void *) buffer, buflength,
-		      IOCTL_TIMEOUT, IOCTL_RETRIES);
+		      timeout, IOCTL_RETRIES);
 
 	req = &SRpnt->sr_request;
 	if (SRpnt->sr_buffer && req->buffer && SRpnt->sr_buffer != req->buffer) {
@@ -198,7 +198,7 @@
 	sr_cmd[1] = (scsi_CDs[minor].device->scsi_level <= SCSI_2) ?
 	            ((scsi_CDs[minor].device->lun) << 5) : 0;
 	sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0;
-	return sr_do_ioctl(minor, sr_cmd, NULL, 0, 1, SCSI_DATA_NONE, NULL);
+	return sr_do_ioctl(minor, sr_cmd, NULL, 0, 1, SCSI_DATA_NONE, NULL, IOCTL_TIMEOUT);
 }
 
 int sr_tray_move(struct cdrom_device_info *cdi, int pos)
@@ -211,7 +211,7 @@
 	sr_cmd[2] = sr_cmd[3] = sr_cmd[5] = 0;
 	sr_cmd[4] = (pos == 0) ? 0x03 /* close */ : 0x02 /* eject */ ;
 
-	return sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL);
+	return sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL, IOCTL_TIMEOUT);
 }
 
 int sr_lock_door(struct cdrom_device_info *cdi, int lock)
@@ -289,7 +289,7 @@
 	sr_cmd[8] = 24;
 	sr_cmd[9] = 0;
 
-	result = sr_do_ioctl(MINOR(cdi->dev), sr_cmd, buffer, 24, 0, SCSI_DATA_READ, NULL);
+	result = sr_do_ioctl(MINOR(cdi->dev), sr_cmd, buffer, 24, 0, SCSI_DATA_READ, NULL, IOCTL_TIMEOUT);
 
 	memcpy(mcn->medium_catalog_number, buffer + 9, 13);
 	mcn->medium_catalog_number[13] = 0;
@@ -319,7 +319,7 @@
 	sr_cmd[2] = (speed >> 8) & 0xff;	/* MSB for speed (in kbytes/sec) */
 	sr_cmd[3] = speed & 0xff;	/* LSB */
 
-	if (sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL))
+	if (sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL, IOCTL_TIMEOUT))
 		return -EIO;
 	return 0;
 }
@@ -349,7 +349,7 @@
 			sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0;
 			sr_cmd[8] = 12;		/* LSB of length */
 
-			result = sr_do_ioctl(target, sr_cmd, buffer, 12, 1, SCSI_DATA_READ, NULL);
+			result = sr_do_ioctl(target, sr_cmd, buffer, 12, 1, SCSI_DATA_READ, NULL, IOCTL_TIMEOUT);
 
 			tochdr->cdth_trk0 = buffer[2];
 			tochdr->cdth_trk1 = buffer[3];
@@ -369,7 +369,7 @@
 			sr_cmd[6] = tocentry->cdte_track;
 			sr_cmd[8] = 12;		/* LSB of length */
 
-			result = sr_do_ioctl(target, sr_cmd, buffer, 12, 0, SCSI_DATA_READ, NULL);
+			result = sr_do_ioctl(target, sr_cmd, buffer, 12, 0, SCSI_DATA_READ, NULL, IOCTL_TIMEOUT);
 
 			tocentry->cdte_ctrl = buffer[5] & 0xf;
 			tocentry->cdte_adr = buffer[5] >> 4;
@@ -396,7 +396,7 @@
 		sr_cmd[7] = ti->cdti_trk1;
 		sr_cmd[8] = ti->cdti_ind1;
 
-		result = sr_do_ioctl(target, sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL);
+		result = sr_do_ioctl(target, sr_cmd, NULL, 0, 0, SCSI_DATA_NONE, NULL, IOCTL_TIMEOUT);
 		if (result == -EDRIVE_CANT_DO_THIS)
 			result = sr_fake_playtrkind(cdi, ti);
 
@@ -462,7 +462,7 @@
 		cmd[9] = 0x10;
 		break;
 	}
-	return sr_do_ioctl(minor, cmd, dest, blksize, 0, SCSI_DATA_READ, NULL);
+	return sr_do_ioctl(minor, cmd, dest, blksize, 0, SCSI_DATA_READ, NULL, IOCTL_TIMEOUT);
 }
 
 /*
@@ -501,7 +501,7 @@
 	cmd[4] = (unsigned char) (lba >> 8) & 0xff;
 	cmd[5] = (unsigned char) lba & 0xff;
 	cmd[8] = 1;
-	rc = sr_do_ioctl(minor, cmd, dest, blksize, 0, SCSI_DATA_READ, NULL);
+	rc = sr_do_ioctl(minor, cmd, dest, blksize, 0, SCSI_DATA_READ, NULL, IOCTL_TIMEOUT);
 
 	return rc;
 }
diff -u -r -N ../../linus/2.4/linux/drivers/scsi/sr_vendor.c linux/drivers/scsi/sr_vendor.c
--- ../../linus/2.4/linux/drivers/scsi/sr_vendor.c	Tue Aug  6 21:15:22 2002
+++ linux/drivers/scsi/sr_vendor.c	Tue Aug  6 21:23:18 2002
@@ -60,6 +60,8 @@
 
 #define VENDOR_ID (scsi_CDs[minor].vendor)
 
+#define VENDOR_TIMEOUT	30*HZ
+
 void sr_vendor_init(int minor)
 {
 #ifndef CONFIG_BLK_DEV_SR_VENDOR
@@ -134,7 +136,7 @@
 	modesel->density = density;
 	modesel->block_length_med = (blocklength >> 8) & 0xff;
 	modesel->block_length_lo = blocklength & 0xff;
-	if (0 == (rc = sr_do_ioctl(minor, cmd, buffer, sizeof(*modesel), 0, SCSI_DATA_WRITE, NULL))) {
+	if (0 == (rc = sr_do_ioctl(minor, cmd, buffer, sizeof(*modesel), 0, SCSI_DATA_WRITE, NULL, VENDOR_TIMEOUT))) {
 		scsi_CDs[minor].device->sector_size = blocklength;
 	}
 #ifdef DEBUG
@@ -179,7 +181,7 @@
 		         (scsi_CDs[minor].device->lun << 5) : 0;
 		cmd[8] = 12;
 		cmd[9] = 0x40;
-		rc = sr_do_ioctl(minor, cmd, buffer, 12, 1, SCSI_DATA_READ, NULL);
+		rc = sr_do_ioctl(minor, cmd, buffer, 12, 1, SCSI_DATA_READ, NULL, VENDOR_TIMEOUT);
 		if (rc != 0)
 			break;
 		if ((buffer[0] << 8) + buffer[1] < 0x0a) {
@@ -205,7 +207,7 @@
 			         (scsi_CDs[minor].device->lun << 5) : 0;
 			cmd[1] |= 0x03;
 			cmd[2] = 0xb0;
-			rc = sr_do_ioctl(minor, cmd, buffer, 0x16, 1, SCSI_DATA_READ, NULL);
+			rc = sr_do_ioctl(minor, cmd, buffer, 0x16, 1, SCSI_DATA_READ, NULL, VENDOR_TIMEOUT);
 			if (rc != 0)
 				break;
 			if (buffer[14] != 0 && buffer[14] != 0xb0) {
@@ -231,7 +233,7 @@
 			cmd[1] = (scsi_CDs[minor].device->scsi_level <= SCSI_2) ?
 			         (scsi_CDs[minor].device->lun << 5) : 0;
 			cmd[1] |= 0x03;
-			rc = sr_do_ioctl(minor, cmd, buffer, 4, 1, SCSI_DATA_READ, NULL);
+			rc = sr_do_ioctl(minor, cmd, buffer, 4, 1, SCSI_DATA_READ, NULL, VENDOR_TIMEOUT);
 			if (rc == -EINVAL) {
 				printk(KERN_INFO "sr%d: Hmm, seems the drive "
 				       "doesn't support multisession CD's\n", minor);
@@ -257,7 +259,7 @@
 		         (scsi_CDs[minor].device->lun << 5) : 0;
 		cmd[8] = 0x04;
 		cmd[9] = 0x40;
-		rc = sr_do_ioctl(minor, cmd, buffer, 0x04, 1, SCSI_DATA_READ, NULL);
+		rc = sr_do_ioctl(minor, cmd, buffer, 0x04, 1, SCSI_DATA_READ, NULL, VENDOR_TIMEOUT);
 		if (rc != 0) {
 			break;
 		}
@@ -272,7 +274,7 @@
 		cmd[6] = rc & 0x7f;	/* number of last session */
 		cmd[8] = 0x0c;
 		cmd[9] = 0x40;
-		rc = sr_do_ioctl(minor, cmd, buffer, 12, 1, SCSI_DATA_READ, NULL);
+		rc = sr_do_ioctl(minor, cmd, buffer, 12, 1, SCSI_DATA_READ, NULL, VENDOR_TIMEOUT);
 		if (rc != 0) {
 			break;
 		}
diff -u -r -N ../../linus/2.4/linux/fs/udf/balloc.c linux/fs/udf/balloc.c
--- ../../linus/2.4/linux/fs/udf/balloc.c	Tue Aug  6 21:16:21 2002
+++ linux/fs/udf/balloc.c	Thu Aug  8 20:44:32 2002
@@ -461,8 +461,7 @@
 	elen = 0;
 	obloc = nbloc = UDF_I_LOCATION(table);
 
-	obh = nbh = udf_tread(sb, udf_get_lb_pblock(sb, nbloc, 0));
-	atomic_inc(&nbh->b_count);
+	obh = nbh = NULL;
 
 	while (count && (etype =
 		udf_next_aext(table, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) != -1)
@@ -506,7 +505,7 @@
 			udf_write_aext(table, obloc, &oextoffset, eloc, elen, obh, 1);
 		}
 
-		if (memcmp(&nbloc, &obloc, sizeof(lb_addr)))
+		if (nbh != obh)
 		{
 			i = -1;
 			obloc = nbloc;
@@ -580,7 +579,10 @@
 			{
 				loffset = nextoffset;
 				aed->lengthAllocDescs = cpu_to_le32(adsize);
-				sptr = (obh)->b_data + nextoffset - adsize;
+				if (obh)
+					sptr = UDF_I_DATA(inode) + nextoffset -  udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode) - adsize;
+				else
+					sptr = obh->b_data + nextoffset - adsize;
 				dptr = nbh->b_data + sizeof(struct allocExtDesc);
 				memcpy(dptr, sptr, adsize);
 				nextoffset = sizeof(struct allocExtDesc) + adsize;
@@ -591,8 +593,8 @@
 				aed->lengthAllocDescs = cpu_to_le32(0);
 				sptr = (obh)->b_data + nextoffset;
 				nextoffset = sizeof(struct allocExtDesc);
-	
-				if (memcmp(&UDF_I_LOCATION(table), &obloc, sizeof(lb_addr)))
+
+				if (obh)
 				{
 					aed = (struct allocExtDesc *)(obh)->b_data;
 					aed->lengthAllocDescs =
@@ -631,15 +633,20 @@
 					break;
 				}
 			}
-			udf_update_tag(obh->b_data, loffset);
-			mark_buffer_dirty(obh);
+			if (obh)
+			{
+				udf_update_tag(obh->b_data, loffset);
+				mark_buffer_dirty(obh);
+			}
+			else
+				mark_inode_dirty(table);
 		}
 
 		if (elen) /* It's possible that stealing the block emptied the extent */
 		{
 			udf_write_aext(table, nbloc, &nextoffset, eloc, elen, nbh, 1);
 
-			if (!memcmp(&UDF_I_LOCATION(table), &nbloc, sizeof(lb_addr)))
+			if (!nbh)
 			{
 				UDF_I_LENALLOC(table) += adsize;
 				mark_inode_dirty(table);
@@ -690,7 +697,7 @@
 	extoffset = sizeof(struct unallocSpaceEntry);
 	bloc = UDF_I_LOCATION(table);
 
-	bh = udf_tread(sb, udf_get_lb_pblock(sb, bloc, 0));
+	bh = NULL;
 	eloc.logicalBlockNum = 0xFFFFFFFF;
 
 	while (first_block != eloc.logicalBlockNum && (etype =
@@ -768,8 +775,7 @@
 	extoffset = sizeof(struct unallocSpaceEntry);
 	bloc = UDF_I_LOCATION(table);
 
-	goal_bh = bh = udf_tread(sb, udf_get_lb_pblock(sb, bloc, 0));
-	atomic_inc(&goal_bh->b_count);
+	goal_bh = bh = NULL;
 
 	while (spread && (etype =
 		udf_next_aext(table, &bloc, &extoffset, &eloc, &elen, &bh, 1)) != -1)
diff -u -r -N ../../linus/2.4/linux/fs/udf/dir.c linux/fs/udf/dir.c
--- ../../linus/2.4/linux/fs/udf/dir.c	Tue Aug  6 21:16:21 2002
+++ linux/fs/udf/dir.c	Thu Aug  8 20:44:32 2002
@@ -122,7 +122,9 @@
 		nf_pos = (udf_ext0_offset(dir) >> 2);
 
 	fibh.soffset = fibh.eoffset = (nf_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
-	if (inode_bmap(dir, nf_pos >> (dir->i_sb->s_blocksize_bits - 2),
+	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
+		fibh.sbh = fibh.ebh = NULL;
+	else if (inode_bmap(dir, nf_pos >> (dir->i_sb->s_blocksize_bits - 2),
 		&bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
 	{
 		offset >>= dir->i_sb->s_blocksize_bits;
@@ -136,40 +138,40 @@
 		}
 		else
 			offset = 0;
-	}
-	else
-	{
-		udf_release_data(bh);
-		return -ENOENT;
-	}
-
-	if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block)))
-	{
-		udf_release_data(bh);
-		return -EIO;
-	}
 
-	if (!(offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9))-1)))
-	{
-		i = 16 >> (dir->i_sb->s_blocksize_bits - 9);
-		if (i+offset > (elen >> dir->i_sb->s_blocksize_bits))
-			i = (elen >> dir->i_sb->s_blocksize_bits)-offset;
-		for (num=0; i>0; i--)
+		if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block)))
 		{
-			block = udf_get_lb_pblock(dir->i_sb, eloc, offset+i);
-			tmp = udf_tgetblk(dir->i_sb, block);
-			if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp))
-				bha[num++] = tmp;
-			else
-				brelse(tmp);
+			udf_release_data(bh);
+			return -EIO;
 		}
-		if (num)
+	
+		if (!(offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9))-1)))
 		{
-			ll_rw_block(READA, num, bha);
-			for (i=0; i<num; i++)
-				brelse(bha[i]);
+			i = 16 >> (dir->i_sb->s_blocksize_bits - 9);
+			if (i+offset > (elen >> dir->i_sb->s_blocksize_bits))
+				i = (elen >> dir->i_sb->s_blocksize_bits)-offset;
+			for (num=0; i>0; i--)
+			{
+				block = udf_get_lb_pblock(dir->i_sb, eloc, offset+i);
+				tmp = udf_tgetblk(dir->i_sb, block);
+				if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp))
+					bha[num++] = tmp;
+				else
+					brelse(tmp);
+			}
+			if (num)
+			{
+				ll_rw_block(READA, num, bha);
+				for (i=0; i<num; i++)
+					brelse(bha[i]);
+			}
 		}
 	}
+	else
+	{
+		udf_release_data(bh);
+		return -ENOENT;
+	}
 
 	while ( nf_pos < size )
 	{
diff -u -r -N ../../linus/2.4/linux/fs/udf/directory.c linux/fs/udf/directory.c
--- ../../linus/2.4/linux/fs/udf/directory.c	Tue Aug  6 21:16:21 2002
+++ linux/fs/udf/directory.c	Thu Aug  8 20:44:32 2002
@@ -17,6 +17,7 @@
  */
 
 #include "udfdecl.h"
+#include "udf_i.h"
 
 #include <linux/fs.h>
 #include <linux/string.h>
@@ -84,6 +85,21 @@
 
 	fibh->soffset = fibh->eoffset;
 
+	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
+	{
+		fi = udf_get_fileident(UDF_I_DATA(dir) - udf_file_entry_alloc_offset(dir),
+			dir->i_sb->s_blocksize, &(fibh->eoffset));
+
+		if (!fi)
+			return NULL;
+
+		*nf_pos += ((fibh->eoffset - fibh->soffset) >> 2);
+
+		memcpy((uint8_t *)cfi, (uint8_t *)fi, sizeof(struct fileIdentDesc));
+
+		return fi;
+	}
+
 	if (fibh->eoffset == dir->i_sb->s_blocksize)
 	{
 		int lextoffset = *extoffset;
@@ -275,53 +291,43 @@
 }
 
 short_ad *
-udf_get_fileshortad(void * buffer, int maxoffset, int *offset, int inc)
+udf_get_fileshortad(uint8_t *ptr, int maxoffset, int *offset, int inc)
 {
-	short_ad * sa;
-	uint8_t * ptr;
+	short_ad *sa;
 
-	if ( (!buffer) || (!offset) )
+	if ( (!ptr) || (!offset) )
 	{
 		printk(KERN_ERR "udf: udf_get_fileshortad() invalidparms\n");
 		return NULL;
 	}
 
-	ptr = (uint8_t *)buffer;
-
-	if ( (*offset > 0) && (*offset < maxoffset) )
-		ptr += *offset;
-	else
+	if ( (*offset < 0) || ((*offset + sizeof(short_ad)) > maxoffset) )
 		return NULL;
-
-	if ((sa = (short_ad *)ptr)->extLength == 0)
+	else if ((sa = (short_ad *)ptr)->extLength == 0)
 		return NULL;
-	else if (inc)
-		(*offset) += sizeof(short_ad);
+
+	if (inc)
+		*offset += sizeof(short_ad);
 	return sa;
 }
 
 long_ad *
-udf_get_filelongad(void * buffer, int maxoffset, int * offset, int inc)
+udf_get_filelongad(uint8_t *ptr, int maxoffset, int * offset, int inc)
 {
-	long_ad * la;
-	uint8_t * ptr;
+	long_ad *la;
 
-	if ( (!buffer) || !(offset) ) 
+	if ( (!ptr) || (!offset) ) 
 	{
 		printk(KERN_ERR "udf: udf_get_filelongad() invalidparms\n");
 		return NULL;
 	}
 
-	ptr = (uint8_t *)buffer;
-
-	if ( (*offset > 0) && (*offset < maxoffset) )
-		ptr += *offset;
-	else
+	if ( (*offset < 0) || ((*offset + sizeof(long_ad)) > maxoffset) )
 		return NULL;
-
-	if ((la = (long_ad *)ptr)->extLength == 0)
+	else if ((la = (long_ad *)ptr)->extLength == 0)
 		return NULL;
-	else if (inc)
-		(*offset) += sizeof(long_ad);
+
+	if (inc)
+		*offset += sizeof(long_ad);
 	return la;
 }
diff -u -r -N ../../linus/2.4/linux/fs/udf/ecma_167.h linux/fs/udf/ecma_167.h
--- ../../linus/2.4/linux/fs/udf/ecma_167.h	Tue Aug  6 21:16:21 2002
+++ linux/fs/udf/ecma_167.h	Tue Aug  6 21:23:58 2002
@@ -606,7 +606,7 @@
 #define FE_RECORD_FMT_CRLF		0x0A
 #define FE_RECORD_FMT_LFCR		0x0B
 
-#define Record Display Attributes (ECMA 167r3 4/14.9.8) */
+/*  Record Display Attributes (ECMA 167r3 4/14.9.8) */
 #define FE_RECORD_DISPLAY_ATTR_UNDEF	0x00
 #define FE_RECORD_DISPLAY_ATTR_1	0x01
 #define FE_RECORD_DISPLAY_ATTR_2	0x02
diff -u -r -N ../../linus/2.4/linux/fs/udf/file.c linux/fs/udf/file.c
--- ../../linus/2.4/linux/fs/udf/file.c	Tue Aug  6 21:16:21 2002
+++ linux/fs/udf/file.c	Thu Aug  8 20:44:32 2002
@@ -46,64 +46,36 @@
 static int udf_adinicb_readpage(struct file *file, struct page * page)
 {
 	struct inode *inode = page->mapping->host;
-
-	struct buffer_head *bh;
-	int block;
 	char *kaddr;
-	int err = 0;
 
 	if (!PageLocked(page))
 		PAGE_BUG(page);
 
 	kaddr = kmap(page);
 	memset(kaddr, 0, PAGE_CACHE_SIZE);
-	block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
-	bh = sb_bread(inode->i_sb, block);
-	if (!bh)
-	{
-		SetPageError(page);
-		err = -EIO;
-		goto out;
-	}
-	memcpy(kaddr, bh->b_data + udf_ext0_offset(inode), inode->i_size);
-	brelse(bh);
+	memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), inode->i_size);
 	flush_dcache_page(page);
 	SetPageUptodate(page);
-out:
 	kunmap(page);
 	UnlockPage(page);
-	return err;
+	return 0;
 }
 
 static int udf_adinicb_writepage(struct page *page)
 {
 	struct inode *inode = page->mapping->host;
-
-	struct buffer_head *bh;
-	int block;
 	char *kaddr;
-	int err = 0;
 
 	if (!PageLocked(page))
 		PAGE_BUG(page);
 
 	kaddr = kmap(page);
-	block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
-	bh = sb_bread(inode->i_sb, block);
-	if (!bh)
-	{
-		SetPageError(page);
-		err = -EIO;
-		goto out;
-	}
-	memcpy(bh->b_data + udf_ext0_offset(inode), kaddr, inode->i_size);
-	mark_buffer_dirty(bh);
-	brelse(bh);
+	memcpy(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), kaddr, inode->i_size);
+	mark_inode_dirty(inode);
 	SetPageUptodate(page);
-out:
 	kunmap(page);
 	UnlockPage(page);
-	return err;
+	return 0;
 }
 
 static int udf_adinicb_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
@@ -115,31 +87,17 @@
 static int udf_adinicb_commit_write(struct file *file, struct page *page, unsigned offset, unsigned to)
 {
 	struct inode *inode = page->mapping->host;
-
-	struct buffer_head *bh;
-	int block;
 	char *kaddr = page_address(page);
-	int err = 0;
 
-	block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
-	bh = sb_bread(inode->i_sb, block);
-	if (!bh)
-	{
-		SetPageError(page);
-		err = -EIO;
-		goto out;
-	}
-	memcpy(bh->b_data + udf_file_entry_alloc_offset(inode) + offset,
+	memcpy(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset,
 		kaddr + offset, to - offset);
-	mark_buffer_dirty(bh);
-	brelse(bh);
+	mark_inode_dirty(inode);
 	SetPageUptodate(page);
-out:
 	kunmap(page);
 	/* only one page here */
 	if (to > inode->i_size)
 		inode->i_size = to;
-	return err;
+	return 0;
 }
 
 struct address_space_operations udf_adinicb_aops = {
@@ -231,9 +189,6 @@
 	unsigned long arg)
 {
 	int result = -EINVAL;
-	struct buffer_head *bh = NULL;
-	long_ad eaicb;
-	uint8_t *ea = NULL;
 
 	if ( permission(inode, MAY_READ) != 0 )
 	{
@@ -248,7 +203,6 @@
 		return -EINVAL;
 	}
 
-	/* first, do ioctls that don't need to udf_read */
 	switch (cmd)
 	{
 		case UDF_GETVOLIDENT:
@@ -266,50 +220,16 @@
 
 			return result;
 		}
-	}
-
-	/* ok, we need to read the inode */
-	bh = udf_tread(inode->i_sb,
-		udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
-
-	if (!bh)
-	{
-		udf_debug("bread failed (inode=%ld)\n", inode->i_ino);
-		return -EIO;
-	}
-
-	if (UDF_I_EXTENDED_FE(inode) == 0)
-	{
-		struct fileEntry *fe;
-
-		fe = (struct fileEntry *)bh->b_data;
-		eaicb = lela_to_cpu(fe->extendedAttrICB);
-		if (UDF_I_LENEATTR(inode))
-			ea = fe->extendedAttr;
-	}
-	else
-	{
-		struct extendedFileEntry *efe;
-
-		efe = (struct extendedFileEntry *)bh->b_data;
-		eaicb = lela_to_cpu(efe->extendedAttrICB);
-		if (UDF_I_LENEATTR(inode))
-			ea = efe->extendedAttr;
-	}
-
-	switch (cmd) 
-	{
 		case UDF_GETEASIZE:
 			result = put_user(UDF_I_LENEATTR(inode), (int *)arg);
 			break;
 
 		case UDF_GETEABLOCK:
-			result = copy_to_user((char *)arg, ea,
+			result = copy_to_user((char *)arg, UDF_I_DATA(inode),
 				UDF_I_LENEATTR(inode)) ? -EFAULT : 0;
 			break;
 	}
 
-	udf_release_data(bh);
 	return result;
 }
 
diff -u -r -N ../../linus/2.4/linux/fs/udf/ialloc.c linux/fs/udf/ialloc.c
--- ../../linus/2.4/linux/fs/udf/ialloc.c	Tue Aug  6 21:16:21 2002
+++ linux/fs/udf/ialloc.c	Thu Aug  8 20:44:32 2002
@@ -28,6 +28,7 @@
 #include <linux/locks.h>
 #include <linux/quotaops.h>
 #include <linux/udf_fs.h>
+#include <linux/slab.h>
 
 #include "udf_i.h"
 #include "udf_sb.h"
@@ -130,13 +131,20 @@
 	inode->i_blocks = 0;
 	UDF_I_LENEATTR(inode) = 0;
 	UDF_I_LENALLOC(inode) = 0;
+	UDF_I_USE(inode) = 0;
 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE))
 	{
-		UDF_I_EXTENDED_FE(inode) = 1;
+		UDF_I_EFE(inode) = 1;
 		UDF_UPDATE_UDFREV(inode->i_sb, UDF_VERS_USE_EXTENDED_FE);
+		UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
+		memset(UDF_I_DATA(inode), 0x00, inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
 	}
 	else
-		UDF_I_EXTENDED_FE(inode) = 0;
+	{
+		UDF_I_EFE(inode) = 0;
+		UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
+		memset(UDF_I_DATA(inode), 0x00, inode->i_sb->s_blocksize - sizeof(struct fileEntry));
+	}
 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_AD_IN_ICB))
 		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
 	else if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
@@ -147,7 +155,6 @@
 		UDF_I_CRTIME(inode) = CURRENT_TIME;
 	UDF_I_UMTIME(inode) = UDF_I_UCTIME(inode) =
 		UDF_I_UCRTIME(inode) = CURRENT_UTIME;
-	UDF_I_NEW_INODE(inode) = 1;
 	insert_inode_hash(inode);
 	mark_inode_dirty(inode);
 
diff -u -r -N ../../linus/2.4/linux/fs/udf/inode.c linux/fs/udf/inode.c
--- ../../linus/2.4/linux/fs/udf/inode.c	Tue Aug  6 21:16:21 2002
+++ linux/fs/udf/inode.c	Thu Aug  8 20:44:32 2002
@@ -38,6 +38,7 @@
 #include <linux/mm.h>
 #include <linux/smp_lock.h>
 #include <linux/module.h>
+#include <linux/slab.h>
 
 #include "udf_i.h"
 #include "udf_sb.h"
@@ -122,6 +123,11 @@
 	clear_inode(inode);
 }
 
+void udf_clear_inode(struct inode *inode)
+{
+	kfree(UDF_I_DATA(inode));
+}
+
 void udf_discard_prealloc(struct inode * inode)
 {
 	if (inode->i_size && inode->i_size != UDF_I_LENEXTENTS(inode) &&
@@ -162,10 +168,8 @@
 
 void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
 {
-	struct buffer_head *bh = NULL;
 	struct page *page;
 	char *kaddr;
-	int block;
 
 	/* from now on we have normal address_space methods */
 	inode->i_data.a_ops = &udf_aops;
@@ -180,10 +184,6 @@
 		return;
 	}
 
-	block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
-	bh = udf_tread(inode->i_sb, block);
-	if (!bh)
-		return;
 	page = grab_cache_page(inode->i_mapping, 0);
 	if (!PageLocked(page))
 		PAGE_BUG(page);
@@ -192,21 +192,19 @@
 		kaddr = kmap(page);
 		memset(kaddr + UDF_I_LENALLOC(inode), 0x00,
 			PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
-		memcpy(kaddr, bh->b_data + udf_file_entry_alloc_offset(inode),
+		memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode),
 			UDF_I_LENALLOC(inode));
 		flush_dcache_page(page);
 		SetPageUptodate(page);
 		kunmap(page);
 	}
-	memset(bh->b_data + udf_file_entry_alloc_offset(inode),
-		0, UDF_I_LENALLOC(inode));
+	memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0x00,
+		UDF_I_LENALLOC(inode));
 	UDF_I_LENALLOC(inode) = 0;
 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
 		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
 	else
 		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
-	mark_buffer_dirty_inode(bh, inode);
-	udf_release_data(bh);
 
 	inode->i_data.a_ops->writepage(page);
 	page_cache_release(page);
@@ -221,18 +219,21 @@
 	struct buffer_head *sbh = NULL, *dbh = NULL;
 	lb_addr bloc, eloc;
 	uint32_t elen, extoffset;
+	uint8_t alloctype;
 
 	struct udf_fileident_bh sfibh, dfibh;
 	loff_t f_pos = udf_ext0_offset(inode) >> 2;
 	int size = (udf_ext0_offset(inode) + inode->i_size) >> 2;
 	struct fileIdentDesc cfi, *sfi, *dfi;
 
+	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
+		alloctype = ICBTAG_FLAG_AD_SHORT;
+	else
+		alloctype = ICBTAG_FLAG_AD_LONG;
+
 	if (!inode->i_size)
 	{
-		if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
-			UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
-		else
-			UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
+		UDF_I_ALLOCTYPE(inode) = alloctype;
 		mark_inode_dirty(inode);
 		return NULL;
 	}
@@ -248,9 +249,6 @@
 		UDF_I_LOCATION(inode).partitionReferenceNum, 0);
 	if (!newblock)
 		return NULL;
-	sbh = udf_tread(inode->i_sb, inode->i_ino);
-	if (!sbh)
-		return NULL;
 	dbh = udf_tgetblk(inode->i_sb, newblock);
 	if (!dbh)
 		return NULL;
@@ -261,18 +259,19 @@
 	mark_buffer_dirty_inode(dbh, inode);
 
 	sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
-	sfibh.sbh = sfibh.ebh = sbh;
+	sbh = sfibh.sbh = sfibh.ebh = NULL;
 	dfibh.soffset = dfibh.eoffset = 0;
 	dfibh.sbh = dfibh.ebh = dbh;
 	while ( (f_pos < size) )
 	{
+		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
 		sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL, NULL, NULL);
 		if (!sfi)
 		{
-			udf_release_data(sbh);
 			udf_release_data(dbh);
 			return NULL;
 		}
+		UDF_I_ALLOCTYPE(inode) = alloctype;
 		sfi->descTag.tagLocation = *block;
 		dfibh.soffset = dfibh.eoffset;
 		dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
@@ -280,21 +279,15 @@
 		if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
 			sfi->fileIdent + sfi->lengthOfImpUse))
 		{
-			udf_release_data(sbh);
+			UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
 			udf_release_data(dbh);
 			return NULL;
 		}
 	}
 	mark_buffer_dirty_inode(dbh, inode);
 
-	memset(sbh->b_data + udf_file_entry_alloc_offset(inode),
-		0, UDF_I_LENALLOC(inode));
-
+	memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0, UDF_I_LENALLOC(inode));
 	UDF_I_LENALLOC(inode) = 0;
-	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
-		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
-	else
-		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
 	bloc = UDF_I_LOCATION(inode);
 	eloc.logicalBlockNum = *block;
 	eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
@@ -304,7 +297,6 @@
 	udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &sbh, 0);
 	/* UniqueID stuff */
 
-	mark_buffer_dirty(sbh);
 	udf_release_data(sbh);
 	mark_inode_dirty(inode);
 	inode->i_version ++;
@@ -732,7 +724,7 @@
 
 				if (elen > numalloc)
 				{
-					laarr[c].extLength -=
+					laarr[i].extLength -=
 						(numalloc << inode->i_sb->s_blocksize_bits);
 					numalloc = 0;
 				}
@@ -854,7 +846,6 @@
 void udf_truncate(struct inode * inode)
 {
 	int offset;
-	struct buffer_head *bh;
 	int err;
 
 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
@@ -879,16 +870,8 @@
 		}
 		else
 		{
-			offset = (inode->i_size & (inode->i_sb->s_blocksize - 1)) +
-				udf_file_entry_alloc_offset(inode);
-
-			if ((bh = udf_tread(inode->i_sb,
-				udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0))))
-			{
-				memset(bh->b_data + offset, 0x00, inode->i_sb->s_blocksize - offset);
-				mark_buffer_dirty(bh);
-				udf_release_data(bh);
-			}
+			offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
+			memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00, inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode));
 			UDF_I_LENALLOC(inode) = inode->i_size;
 		}
 	}
@@ -1037,7 +1020,6 @@
 	int offset, alen;
 
 	inode->i_version = ++event;
-	UDF_I_NEW_INODE(inode) = 0;
 
 	fe = (struct fileEntry *)bh->b_data;
 	efe = (struct extendedFileEntry *)bh->b_data;
@@ -1049,14 +1031,28 @@
 
 	UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK;
 	if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE)
-		UDF_I_EXTENDED_FE(inode) = 1;
+	{
+		UDF_I_EFE(inode) = 1;
+		UDF_I_USE(inode) = 0;
+		UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
+		memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
+	}
 	else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE)
-		UDF_I_EXTENDED_FE(inode) = 0;
+	{
+		UDF_I_EFE(inode) = 0;
+		UDF_I_USE(inode) = 0;
+		UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
+		memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
+	}
 	else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
 	{
+		UDF_I_EFE(inode) = 0;
+		UDF_I_USE(inode) = 1;
 		UDF_I_LENALLOC(inode) =
 			le32_to_cpu(
 				((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs);
+		UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry), GFP_KERNEL);
+		memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
 		return;
 	}
 
@@ -1079,7 +1075,7 @@
 	UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
 	UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
 
-	if (UDF_I_EXTENDED_FE(inode) == 0)
+	if (UDF_I_EFE(inode) == 0)
 	{
 		inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
 			(inode->i_sb->s_blocksize_bits - 9);
@@ -1325,19 +1321,11 @@
 		udf_debug("bread failure\n");
 		return -EIO;
 	}
+
+	memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
+
 	fe = (struct fileEntry *)bh->b_data;
 	efe = (struct extendedFileEntry *)bh->b_data;
-	if (UDF_I_NEW_INODE(inode) == 1)
-	{
-		if (UDF_I_EXTENDED_FE(inode) == 0)
-			memset(bh->b_data, 0x00, sizeof(struct fileEntry));
-		else
-			memset(bh->b_data, 0x00, sizeof(struct extendedFileEntry));
-		memset(bh->b_data + udf_file_entry_alloc_offset(inode) +
-			UDF_I_LENALLOC(inode), 0x0, inode->i_sb->s_blocksize -
-			udf_file_entry_alloc_offset(inode) - UDF_I_LENALLOC(inode));
-		UDF_I_NEW_INODE(inode) = 0;
-	}
 
 	if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
 	{
@@ -1345,6 +1333,7 @@
 			(struct unallocSpaceEntry *)bh->b_data;
 
 		use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
+		memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
 		crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) -
 			sizeof(tag);
 		use->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
@@ -1415,8 +1404,9 @@
 		udf_release_data(tbh);
 	}
 
-	if (UDF_I_EXTENDED_FE(inode) == 0)
+	if (UDF_I_EFE(inode) == 0)
 	{
+		memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
 		fe->logicalBlocksRecorded = cpu_to_le64(
 			(inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
 			(inode->i_sb->s_blocksize_bits - 9));
@@ -1439,6 +1429,7 @@
 	}
 	else
 	{
+		memcpy(bh->b_data + sizeof(struct extendedFileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
 		efe->objectSize = cpu_to_le64(inode->i_size);
 		efe->logicalBlocksRecorded = cpu_to_le64(
 			(inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
@@ -1619,17 +1610,12 @@
 	long_ad *lad = NULL;
 	struct allocExtDesc *aed;
 	int8_t etype;
+	uint8_t *ptr;
 
-	if (!(*bh))
-	{
-		if (!(*bh = udf_tread(inode->i_sb,
-			udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
-		{
-			udf_debug("reading block %d failed!\n",
-				udf_get_lb_pblock(inode->i_sb, *bloc, 0));
-			return -1;
-		}
-	}
+	if (!*bh)
+		ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
+	else
+		ptr = (*bh)->b_data + *extoffset;
 
 	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
 		adsize = sizeof(short_ad);
@@ -1668,7 +1654,7 @@
 		{
 			loffset = *extoffset;
 			aed->lengthAllocDescs = cpu_to_le32(adsize);
-			sptr = (*bh)->b_data + *extoffset - adsize;
+			sptr = ptr - adsize;
 			dptr = nbh->b_data + sizeof(struct allocExtDesc);
 			memcpy(dptr, sptr, adsize);
 			*extoffset = sizeof(struct allocExtDesc) + adsize;
@@ -1677,10 +1663,10 @@
 		{
 			loffset = *extoffset + adsize;
 			aed->lengthAllocDescs = cpu_to_le32(0);
-			sptr = (*bh)->b_data + *extoffset;
+			sptr = ptr;
 			*extoffset = sizeof(struct allocExtDesc);
 
-			if (memcmp(&UDF_I_LOCATION(inode), &obloc, sizeof(lb_addr)))
+			if (*bh)
 			{
 				aed = (struct allocExtDesc *)(*bh)->b_data;
 				aed->lengthAllocDescs =
@@ -1720,18 +1706,23 @@
 				break;
 			}
 		}
-		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
-			udf_update_tag((*bh)->b_data, loffset);
+		if (*bh)
+		{
+			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
+				udf_update_tag((*bh)->b_data, loffset);
+			else
+				udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
+			mark_buffer_dirty_inode(*bh, inode);
+			udf_release_data(*bh);
+		}
 		else
-			udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
-		mark_buffer_dirty_inode(*bh, inode);
-		udf_release_data(*bh);
+			mark_inode_dirty(inode);
 		*bh = nbh;
 	}
 
 	etype = udf_write_aext(inode, *bloc, extoffset, eloc, elen, *bh, inc);
 
-	if (!memcmp(&UDF_I_LOCATION(inode), bloc, sizeof(lb_addr)))
+	if (!*bh)
 	{
 		UDF_I_LENALLOC(inode) += adsize;
 		mark_inode_dirty(inode);
@@ -1755,49 +1746,40 @@
     lb_addr eloc, uint32_t elen, struct buffer_head *bh, int inc)
 {
 	int adsize;
-	short_ad *sad = NULL;
-	long_ad *lad = NULL;
+	uint8_t *ptr;
 
-	if (!(bh))
-	{
-		if (!(bh = udf_tread(inode->i_sb,
-			udf_get_lb_pblock(inode->i_sb, bloc, 0))))
-		{
-			udf_debug("reading block %d failed!\n",
-				udf_get_lb_pblock(inode->i_sb, bloc, 0));
-			return -1;
-		}
-	}
+	if (!bh)
+		ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
 	else
+	{
+		ptr = bh->b_data + *extoffset;
 		atomic_inc(&bh->b_count);
-
-	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
-		adsize = sizeof(short_ad);
-	else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
-		adsize = sizeof(long_ad);
-	else
-		return -1;
+	}
 
 	switch (UDF_I_ALLOCTYPE(inode))
 	{
 		case ICBTAG_FLAG_AD_SHORT:
 		{
-			sad = (short_ad *)((bh)->b_data + *extoffset);
+			short_ad *sad = (short_ad *)ptr;
 			sad->extLength = cpu_to_le32(elen);
 			sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
+			adsize = sizeof(short_ad);
 			break;
 		}
 		case ICBTAG_FLAG_AD_LONG:
 		{
-			lad = (long_ad *)((bh)->b_data + *extoffset);
+			long_ad *lad = (long_ad *)ptr;
 			lad->extLength = cpu_to_le32(elen);
 			lad->extLocation = cpu_to_lelb(eloc);
 			memset(lad->impUse, 0x00, sizeof(lad->impUse));
+			adsize = sizeof(long_ad);
 			break;
 		}
+		default:
+			return -1;
 	}
 
-	if (memcmp(&UDF_I_LOCATION(inode), &bloc, sizeof(lb_addr)))
+	if (bh)
 	{
 		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
 		{
@@ -1806,30 +1788,28 @@
 				le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc));
 		}
 		mark_buffer_dirty_inode(bh, inode);
+		udf_release_data(bh);
 	}
 	else
-	{
 		mark_inode_dirty(inode);
-		mark_buffer_dirty(bh);
-	}
 
 	if (inc)
 		*extoffset += adsize;
-	udf_release_data(bh);
 	return (elen >> 30);
 }
 
 int8_t udf_next_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
 	lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
 {
-	uint16_t tagIdent;
-	int pos, alen;
 	int8_t etype;
 
-	if (!(*bh))
+	while ((etype = udf_current_aext(inode, bloc, extoffset, eloc, elen, bh, inc)) ==
+		(EXT_NEXT_EXTENT_ALLOCDECS >> 30))
 	{
-		if (!(*bh = udf_tread(inode->i_sb,
-			udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
+		*bloc = *eloc;
+		*extoffset = sizeof(struct allocExtDesc);
+		udf_release_data(*bh);
+		if (!(*bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
 		{
 			udf_debug("reading block %d failed!\n",
 				udf_get_lb_pblock(inode->i_sb, *bloc, 0));
@@ -1837,154 +1817,38 @@
 		}
 	}
 
-	tagIdent = le16_to_cpu(((tag *)(*bh)->b_data)->tagIdent);
-
-	if (!memcmp(&UDF_I_LOCATION(inode), bloc, sizeof(lb_addr)))
-	{
-		if (tagIdent == TAG_IDENT_FE || tagIdent == TAG_IDENT_EFE ||
-			UDF_I_NEW_INODE(inode))
-		{
-			pos = udf_file_entry_alloc_offset(inode);
-			alen = UDF_I_LENALLOC(inode) + pos;
-		}
-		else if (tagIdent == TAG_IDENT_USE)
-		{
-			pos = sizeof(struct unallocSpaceEntry);
-			alen = UDF_I_LENALLOC(inode) + pos;
-		}
-		else
-			return -1;
-	}
-	else if (tagIdent == TAG_IDENT_AED)
-	{
-		struct allocExtDesc *aed = (struct allocExtDesc *)(*bh)->b_data;
-
-		pos = sizeof(struct allocExtDesc);
-		alen = le32_to_cpu(aed->lengthAllocDescs) + pos;
-	}
-	else
-		return -1;
-
-	if (!(*extoffset))
-		*extoffset = pos;
-
-	switch (UDF_I_ALLOCTYPE(inode))
-	{
-		case ICBTAG_FLAG_AD_SHORT:
-		{
-			short_ad *sad;
-
-			if (!(sad = udf_get_fileshortad((*bh)->b_data, alen, extoffset, inc)))
-				return -1;
-
-			if ((etype = le32_to_cpu(sad->extLength) >> 30) == (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
-			{
-				bloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
-				*extoffset = 0;
-				udf_release_data(*bh);
-				*bh = NULL;
-				return udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, inc);
-			}
-			else
-			{
-				eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
-				eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
-				*elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
-			}
-			break;
-		}
-		case ICBTAG_FLAG_AD_LONG:
-		{
-			long_ad *lad;
-
-			if (!(lad = udf_get_filelongad((*bh)->b_data, alen, extoffset, inc)))
-				return -1;
-
-			if ((etype = le32_to_cpu(lad->extLength) >> 30) == (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
-			{
-				*bloc = lelb_to_cpu(lad->extLocation);
-				*extoffset = 0;
-				udf_release_data(*bh);
-				*bh = NULL;
-				return udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, inc);
-			}
-			else
-			{
-				*eloc = lelb_to_cpu(lad->extLocation);
-				*elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
-			}
-			break;
-		}
-		case ICBTAG_FLAG_AD_IN_ICB:
-		{
-			if (UDF_I_LENALLOC(inode) == 0)
-				return -1;
-			etype = (EXT_RECORDED_ALLOCATED >> 30);
-			*eloc = UDF_I_LOCATION(inode);
-			*elen = UDF_I_LENALLOC(inode);
-			break;
-		}
-		default:
-		{
-			udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
-			return -1;
-		}
-	}
-	if (*elen)
-		return etype;
-
-	udf_debug("Empty Extent, inode=%ld, alloctype=%d, eloc=%d, elen=%d, etype=%d, extoffset=%d\n",
-		inode->i_ino, UDF_I_ALLOCTYPE(inode), eloc->logicalBlockNum, *elen, etype, *extoffset);
-	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
-		*extoffset -= sizeof(short_ad);
-	else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
-		*extoffset -= sizeof(long_ad);
-	return -1;
+	return etype;
 }
 
 int8_t udf_current_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
 	lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
 {
-	int pos, alen;
+	int alen;
 	int8_t etype;
+	uint8_t *ptr;
 
-	if (!(*bh))
+	if (!*bh)
 	{
-		if (!(*bh = udf_tread(inode->i_sb,
-			udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
-		{
-			udf_debug("reading block %d failed!\n",
-				udf_get_lb_pblock(inode->i_sb, *bloc, 0));
-			return -1;
-		}
-	}
-
-	if (!memcmp(&UDF_I_LOCATION(inode), bloc, sizeof(lb_addr)))
-	{
-		if (!(UDF_I_EXTENDED_FE(inode)))
-			pos = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode);
-		else
-			pos = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
-		alen = UDF_I_LENALLOC(inode) + pos;
+		if (!(*extoffset))
+			*extoffset = udf_file_entry_alloc_offset(inode);
+		ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
+		alen = udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode);
 	}
 	else
 	{
-		struct allocExtDesc *aed = (struct allocExtDesc *)(*bh)->b_data;
-
-		pos = sizeof(struct allocExtDesc);
-		alen = le32_to_cpu(aed->lengthAllocDescs) + pos;
+		if (!(*extoffset))
+			*extoffset = sizeof(struct allocExtDesc);
+		ptr = (*bh)->b_data + *extoffset;
+		alen = le32_to_cpu(((struct allocExtDesc *)(*bh)->b_data)->lengthAllocDescs);
 	}
 
-	if (!(*extoffset))
-		*extoffset = pos;
-
 	switch (UDF_I_ALLOCTYPE(inode))
 	{
 		case ICBTAG_FLAG_AD_SHORT:
 		{
 			short_ad *sad;
 
-			if (!(sad = udf_get_fileshortad((*bh)->b_data, alen, extoffset, inc)))
+			if (!(sad = udf_get_fileshortad(ptr, alen, extoffset, inc)))
 				return -1;
 
 			etype = le32_to_cpu(sad->extLength) >> 30;
@@ -1997,7 +1861,7 @@
 		{
 			long_ad *lad;
 
-			if (!(lad = udf_get_filelongad((*bh)->b_data, alen, extoffset, inc)))
+			if (!(lad = udf_get_filelongad(ptr, alen, extoffset, inc)))
 				return -1;
 
 			etype = le32_to_cpu(lad->extLength) >> 30;
@@ -2011,15 +1875,8 @@
 			return -1;
 		}
 	}
-	if (*elen)
-		return etype;
 
-	udf_debug("Empty Extent!\n");
-	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
-		*extoffset -= sizeof(short_ad);
-	else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
-		*extoffset -= sizeof(long_ad);
-	return -1;
+	return etype;
 }
 
 int8_t udf_insert_aext(struct inode *inode, lb_addr bloc, int extoffset,
@@ -2029,17 +1886,7 @@
 	uint32_t oelen;
 	int8_t etype;
 
-	if (!bh)
-	{
-		if (!(bh = udf_tread(inode->i_sb,
-			udf_get_lb_pblock(inode->i_sb, bloc, 0))))
-		{
-			udf_debug("reading block %d failed!\n",
-				udf_get_lb_pblock(inode->i_sb, bloc, 0));
-			return -1;
-		}
-	}
-	else
+	if (bh)
 		atomic_inc(&bh->b_count);
 
 	while ((etype = udf_next_aext(inode, &bloc, &extoffset, &oeloc, &oelen, &bh, 0)) != -1)
@@ -2063,19 +1910,11 @@
 	int8_t etype;
 	struct allocExtDesc *aed;
 
-	if (!(nbh))
+	if (nbh)
 	{
-		if (!(nbh = udf_tread(inode->i_sb,
-			udf_get_lb_pblock(inode->i_sb, nbloc, 0))))
-		{
-			udf_debug("reading block %d failed!\n",
-				udf_get_lb_pblock(inode->i_sb, nbloc, 0));
-			return -1;
-		}
-	}
-	else
 		atomic_inc(&nbh->b_count);
-	atomic_inc(&nbh->b_count);
+		atomic_inc(&nbh->b_count);
+	}
 
 	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
 		adsize = sizeof(short_ad);
@@ -2094,7 +1933,7 @@
 	while ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) != -1)
 	{
 		udf_write_aext(inode, obloc, &oextoffset, eloc, (etype << 30) | elen, obh, 1);
-		if (memcmp(&nbloc, &obloc, sizeof(lb_addr)))
+		if (obh != nbh)
 		{
 			obloc = nbloc;
 			udf_release_data(obh);
@@ -2106,12 +1945,12 @@
 	memset(&eloc, 0x00, sizeof(lb_addr));
 	elen = 0;
 
-	if (memcmp(&nbloc, &obloc, sizeof(lb_addr)))
+	if (nbh != obh)
 	{
 		udf_free_blocks(inode->i_sb, inode, nbloc, 0, 1);
 		udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
 		udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
-		if (!memcmp(&UDF_I_LOCATION(inode), &obloc, sizeof(lb_addr)))
+		if (!obh)
 		{
 			UDF_I_LENALLOC(inode) -= (adsize * 2);
 			mark_inode_dirty(inode);
@@ -2131,7 +1970,7 @@
 	else
 	{
 		udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
-		if (!memcmp(&UDF_I_LOCATION(inode), &obloc, sizeof(lb_addr)))
+		if (!obh)
 		{
 			UDF_I_LENALLOC(inode) -= adsize;
 			mark_inode_dirty(inode);
@@ -2206,9 +2045,7 @@
 		ret = 0;
 
 	unlock_kernel();
-
-	if (bh)
-		udf_release_data(bh);
+	udf_release_data(bh);
 
 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
 		return udf_fixed_to_variable(ret);
diff -u -r -N ../../linus/2.4/linux/fs/udf/misc.c linux/fs/udf/misc.c
--- ../../linus/2.4/linux/fs/udf/misc.c	Tue Aug  6 21:16:21 2002
+++ linux/fs/udf/misc.c	Thu Aug  8 20:44:32 2002
@@ -73,7 +73,7 @@
 
 	*bh = udf_tread(inode->i_sb, inode->i_ino);
 
-	if (UDF_I_EXTENDED_FE(inode) == 0)
+	if (UDF_I_EFE(inode) == 0)
 	{
 		struct fileEntry *fe;
 
@@ -189,7 +189,7 @@
 
 	*bh = udf_tread(inode->i_sb, inode->i_ino);
 
-	if (UDF_I_EXTENDED_FE(inode) == 0)
+	if (UDF_I_EFE(inode) == 0)
 	{
 		struct fileEntry *fe;
 
diff -u -r -N ../../linus/2.4/linux/fs/udf/namei.c linux/fs/udf/namei.c
--- ../../linus/2.4/linux/fs/udf/namei.c	Tue Aug  6 21:16:21 2002
+++ linux/fs/udf/namei.c	Thu Aug  8 20:44:32 2002
@@ -56,12 +56,16 @@
 	uint8_t lfi = cfi->lengthFileIdent;
 	int padlen = fibh->eoffset - fibh->soffset - liu - lfi -
 		sizeof(struct fileIdentDesc);
+	int adinicb = 0;
+
+	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
+		adinicb = 1;
 
 	offset = fibh->soffset + sizeof(struct fileIdentDesc);
 
 	if (impuse)
 	{
-		if (offset + liu < 0)
+		if (adinicb || (offset + liu < 0))
 			memcpy((uint8_t *)sfi->impUse, impuse, liu);
 		else if (offset >= 0)
 			memcpy(fibh->ebh->b_data + offset, impuse, liu);
@@ -76,7 +80,7 @@
 
 	if (fileident)
 	{
-		if (offset + lfi < 0)
+		if (adinicb || (offset + lfi < 0))
 			memcpy((uint8_t *)sfi->fileIdent + liu, fileident, lfi);
 		else if (offset >= 0)
 			memcpy(fibh->ebh->b_data + offset, fileident, lfi);
@@ -89,7 +93,7 @@
 
 	offset += lfi;
 
-	if (offset + padlen < 0)
+	if (adinicb || (offset + padlen < 0))
 		memset((uint8_t *)sfi->padding + liu + lfi, 0x00, padlen);
 	else if (offset >= 0)
 		memset(fibh->ebh->b_data + offset, 0x00, padlen);
@@ -123,7 +127,7 @@
 			checksum += ((uint8_t *)&cfi->descTag)[i];
 
 	cfi->descTag.tagChecksum = checksum;
-	if (sizeof(struct fileIdentDesc) <= -fibh->soffset)
+	if (adinicb || (sizeof(struct fileIdentDesc) <= -fibh->soffset))
 		memcpy((uint8_t *)sfi, (uint8_t *)cfi, sizeof(struct fileIdentDesc));
 	else
 	{
@@ -132,9 +136,14 @@
 			sizeof(struct fileIdentDesc) + fibh->soffset);
 	}
 
-	if (fibh->sbh != fibh->ebh)
-		mark_buffer_dirty_inode(fibh->ebh, inode);
-	mark_buffer_dirty_inode(fibh->sbh, inode);
+	if (adinicb)
+		mark_inode_dirty(inode);
+	else
+	{
+		if (fibh->sbh != fibh->ebh)
+			mark_buffer_dirty_inode(fibh->ebh, inode);
+		mark_buffer_dirty_inode(fibh->sbh, inode);
+	}
 	return 0;
 }
 
@@ -161,7 +170,9 @@
 	f_pos = (udf_ext0_offset(dir) >> 2);
 
 	fibh->soffset = fibh->eoffset = (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
-	if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
+	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
+		fibh->sbh = fibh->ebh = NULL;
+	else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
 		&bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
 	{
 		offset >>= dir->i_sb->s_blocksize_bits;
@@ -175,6 +186,12 @@
 		}
 		else
 			offset = 0;
+
+		if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block)))
+		{
+			udf_release_data(bh);
+			return NULL;
+		}
 	}
 	else
 	{
@@ -182,12 +199,6 @@
 		return NULL;
 	}
 
-	if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block)))
-	{
-		udf_release_data(bh);
-		return NULL;
-	}
-
 	while ( (f_pos < size) )
 	{
 		fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &bloc, &extoffset, &eloc, &elen, &offset, &bh);
@@ -388,7 +399,9 @@
 	f_pos = (udf_ext0_offset(dir) >> 2);
 
 	fibh->soffset = fibh->eoffset = (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
-	if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
+	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
+		fibh->sbh = fibh->ebh = NULL;
+	else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
 		&bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
 	{
 		offset >>= dir->i_sb->s_blocksize_bits;
@@ -409,94 +422,89 @@
 			*err = -EIO;
 			return NULL;
 		}
-	
+
 		block = UDF_I_LOCATION(dir).logicalBlockNum;
-	
-		while ( (f_pos < size) )
+
+	}
+	else
+	{
+		block = udf_get_lb_pblock(dir->i_sb, UDF_I_LOCATION(dir), 0);
+		fibh->sbh = fibh->ebh = NULL;
+		fibh->soffset = fibh->eoffset = sb->s_blocksize;
+		goto add;
+	}
+
+	while ( (f_pos < size) )
+	{
+		fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &bloc, &extoffset, &eloc, &elen, &offset, &bh);
+
+		if (!fi)
 		{
-			fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &bloc, &extoffset, &eloc, &elen, &offset, &bh);
-	
-			if (!fi)
-			{
-				if (fibh->sbh != fibh->ebh)
-					udf_release_data(fibh->ebh);
-				udf_release_data(fibh->sbh);
-				udf_release_data(bh);
-				*err = -EIO;
-				return NULL;
-			}
-	
-			liu = le16_to_cpu(cfi->lengthOfImpUse);
-			lfi = cfi->lengthFileIdent;
-	
-			if (fibh->sbh == fibh->ebh)
-				nameptr = fi->fileIdent + liu;
+			if (fibh->sbh != fibh->ebh)
+				udf_release_data(fibh->ebh);
+			udf_release_data(fibh->sbh);
+			udf_release_data(bh);
+			*err = -EIO;
+			return NULL;
+		}
+
+		liu = le16_to_cpu(cfi->lengthOfImpUse);
+		lfi = cfi->lengthFileIdent;
+
+		if (fibh->sbh == fibh->ebh)
+			nameptr = fi->fileIdent + liu;
+		else
+		{
+			int poffset;	/* Unpaded ending offset */
+
+			poffset = fibh->soffset + sizeof(struct fileIdentDesc) + liu + lfi;
+
+			if (poffset >= lfi)
+				nameptr = (char *)(fibh->ebh->b_data + poffset - lfi);
 			else
 			{
-				int poffset;	/* Unpaded ending offset */
-	
-				poffset = fibh->soffset + sizeof(struct fileIdentDesc) + liu + lfi;
-	
-				if (poffset >= lfi)
-					nameptr = (char *)(fibh->ebh->b_data + poffset - lfi);
-				else
-				{
-					nameptr = fname;
-					memcpy(nameptr, fi->fileIdent + liu, lfi - poffset);
-					memcpy(nameptr + lfi - poffset, fibh->ebh->b_data, poffset);
-				}
+				nameptr = fname;
+				memcpy(nameptr, fi->fileIdent + liu, lfi - poffset);
+				memcpy(nameptr + lfi - poffset, fibh->ebh->b_data, poffset);
 			}
-	
-			if ( (cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0 )
+		}
+
+		if ( (cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0 )
+		{
+			if (((sizeof(struct fileIdentDesc) + liu + lfi + 3) & ~3) == nfidlen)
 			{
-				if (((sizeof(struct fileIdentDesc) + liu + lfi + 3) & ~3) == nfidlen)
+				udf_release_data(bh);
+				cfi->descTag.tagSerialNum = cpu_to_le16(1);
+				cfi->fileVersionNum = cpu_to_le16(1);
+				cfi->fileCharacteristics = 0;
+				cfi->lengthFileIdent = namelen;
+				cfi->lengthOfImpUse = cpu_to_le16(0);
+				if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name))
+					return fi;
+				else
 				{
-					udf_release_data(bh);
-					cfi->descTag.tagSerialNum = cpu_to_le16(1);
-					cfi->fileVersionNum = cpu_to_le16(1);
-					cfi->fileCharacteristics = 0;
-					cfi->lengthFileIdent = namelen;
-					cfi->lengthOfImpUse = cpu_to_le16(0);
-					if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name))
-						return fi;
-					else
-					{
-						*err = -EIO;
-						return NULL;
-					}
+					*err = -EIO;
+					return NULL;
 				}
 			}
+		}
 
-			if (!lfi || !dentry)
-				continue;
+		if (!lfi || !dentry)
+			continue;
 
-			if ((flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi)) &&
-				udf_match(flen, fname, &(dentry->d_name)))
-			{
-				if (fibh->sbh != fibh->ebh)
-					udf_release_data(fibh->ebh);
-				udf_release_data(fibh->sbh);
-				udf_release_data(bh);
-				*err = -EEXIST;
-				return NULL;
-			}
-		}
-	}
-	else
-	{
-		block = udf_get_lb_pblock(dir->i_sb, UDF_I_LOCATION(dir), 0);
-		if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
+		if ((flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi)) &&
+			udf_match(flen, fname, &(dentry->d_name)))
 		{
-			fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block);
-			fibh->soffset = fibh->eoffset = udf_file_entry_alloc_offset(dir);
-		}
-		else
-		{
-			fibh->sbh = fibh->ebh = NULL;
-			fibh->soffset = fibh->eoffset = sb->s_blocksize;
+			if (fibh->sbh != fibh->ebh)
+				udf_release_data(fibh->ebh);
+			udf_release_data(fibh->sbh);
+			udf_release_data(bh);
+			*err = -EEXIST;
+			return NULL;
 		}
 	}
 
+add:
 	f_pos += nfidlen;
 
 	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB &&
@@ -533,13 +541,17 @@
 			fibh->sbh = fibh->ebh;
 		}
 
-		if (UDF_I_ALLOCTYPE(dir) != ICBTAG_FLAG_AD_IN_ICB)
+		if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
+		{
+			block = UDF_I_LOCATION(dir).logicalBlockNum;
+			fi = (struct fileIdentDesc *)(UDF_I_DATA(dir) + fibh->soffset - udf_ext0_offset(dir));
+		}
+		else
+		{
 			block = eloc.logicalBlockNum + ((elen - 1) >>
 				dir->i_sb->s_blocksize_bits);
-		else
-			block = UDF_I_LOCATION(dir).logicalBlockNum;
-				
-		fi = (struct fileIdentDesc *)(fibh->sbh->b_data + fibh->soffset);
+			fi = (struct fileIdentDesc *)(fibh->sbh->b_data + fibh->soffset);
+		}
 	}
 	else
 	{
@@ -784,7 +796,10 @@
 	f_pos = (udf_ext0_offset(dir) >> 2);
 
 	fibh.soffset = fibh.eoffset = (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
-	if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
+
+	if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
+		fibh.sbh = fibh.ebh = NULL;
+	else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
 		&bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
 	{
 		offset >>= dir->i_sb->s_blocksize_bits;
@@ -798,6 +813,12 @@
 		}
 		else
 			offset = 0;
+
+		if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block)))
+		{
+			udf_release_data(bh);
+			return 0;
+		}
 	}
 	else
 	{
@@ -805,8 +826,6 @@
 		return 0;
 	}
 
-	if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block)))
-		return 0;
 
 	while ( (f_pos < size) )
 	{
@@ -823,6 +842,9 @@
 
 		if (cfi.lengthFileIdent && (cfi.fileCharacteristics & FID_FILE_CHAR_DELETED) == 0)
 		{
+			if (fibh.sbh != fibh.ebh)
+				udf_release_data(fibh.ebh);
+			udf_release_data(fibh.sbh);
 			udf_release_data(bh);
 			return 0;
 		}
diff -u -r -N ../../linus/2.4/linux/fs/udf/super.c linux/fs/udf/super.c
--- ../../linus/2.4/linux/fs/udf/super.c	Tue Aug  6 21:16:21 2002
+++ linux/fs/udf/super.c	Thu Aug  8 20:44:32 2002
@@ -104,6 +104,7 @@
 	write_inode:		udf_write_inode,
 	put_inode:		udf_put_inode,
 	delete_inode:		udf_delete_inode,
+	clear_inode:		udf_clear_inode,
 	put_super:		udf_put_super,
 	write_super:		udf_write_super,
 	statfs:			udf_statfs,
@@ -313,10 +314,6 @@
 	UDF_SB(sb)->s_gid   = uopt.gid;
 	UDF_SB(sb)->s_umask = uopt.umask;
 
-#if UDFFS_RW != 1
-	*flags |= MS_RDONLY;
-#endif
-
 	if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
 		return 0;
 	if (*flags & MS_RDONLY)
@@ -1373,10 +1370,6 @@
 
 	memset(UDF_SB(sb), 0x00, sizeof(struct udf_sb_info));
 
-#if UDFFS_RW != 1
-	sb->s_flags |= MS_RDONLY;
-#endif
-
 	if (!udf_parse_options((char *)options, &uopt))
 		goto error_out;
 
@@ -1488,8 +1481,8 @@
 	{
 		timestamp ts;
 		udf_time_to_stamp(&ts, UDF_SB_RECORDTIME(sb), 0);
-		udf_info("UDF %s-%s (%s) Mounting volume '%s', timestamp %04u/%02u/%02u %02u:%02u (%x)\n",
-			UDFFS_VERSION, UDFFS_RW ? "rw" : "ro", UDFFS_DATE,
+		udf_info("UDF %s (%s) Mounting volume '%s', timestamp %04u/%02u/%02u %02u:%02u (%x)\n",
+			UDFFS_VERSION, UDFFS_DATE,
 			UDF_SB_VOLIDENT(sb), ts.year, ts.month, ts.day, ts.hour, ts.minute,
 			ts.typeAndTimezone);
 	}
diff -u -r -N ../../linus/2.4/linux/fs/udf/symlink.c linux/fs/udf/symlink.c
--- ../../linus/2.4/linux/fs/udf/symlink.c	Tue Aug  6 21:16:21 2002
+++ linux/fs/udf/symlink.c	Thu Aug  8 20:44:32 2002
@@ -87,14 +87,7 @@
 	
 	lock_kernel();
 	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
-	{
-		bh = udf_tread(inode->i_sb, inode->i_ino);
-
-		if (!bh)
-			goto out;
-
-		symlink = bh->b_data + udf_file_entry_alloc_offset(inode);
-	}
+		symlink = UDF_I_DATA(inode) + UDF_I_LENALLOC(inode);
 	else
 	{
 		bh = sb_bread(inode->i_sb, udf_block_map(inode, 0));
diff -u -r -N ../../linus/2.4/linux/fs/udf/truncate.c linux/fs/udf/truncate.c
--- ../../linus/2.4/linux/fs/udf/truncate.c	Tue Aug  6 21:16:21 2002
+++ linux/fs/udf/truncate.c	Thu Aug  8 20:44:32 2002
@@ -57,7 +57,9 @@
 		if (last_block - first_block > 0)
 		{
 			if (etype == (EXT_RECORDED_ALLOCATED >> 30))
+			{
 				mark_inode_dirty(inode);
+			}
 
 			if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
 				udf_free_blocks(inode->i_sb, inode, eloc, first_block, last_block - first_block);
@@ -94,7 +96,7 @@
 		else
 			lenalloc = extoffset - adsize;
 
-		if (!memcmp(&UDF_I_LOCATION(inode), &bloc, sizeof(lb_addr)))
+		if (!bh)
 			lenalloc -= udf_file_entry_alloc_offset(inode);
 		else
 			lenalloc -= sizeof(struct allocExtDesc);
@@ -107,15 +109,15 @@
 				extoffset = 0;
 				if (lelen)
 				{
-					if (!memcmp(&UDF_I_LOCATION(inode), &bloc, sizeof(lb_addr)))
-						memset(bh->b_data, 0x00, udf_file_entry_alloc_offset(inode));
+					if (!bh)
+						BUG();
 					else
 						memset(bh->b_data, 0x00, sizeof(struct allocExtDesc));
 					udf_free_blocks(inode->i_sb, inode, bloc, 0, lelen);
 				}
 				else
 				{
-					if (!memcmp(&UDF_I_LOCATION(inode), &bloc, sizeof(lb_addr)))
+					if (!bh)
 					{
 						UDF_I_LENALLOC(inode) = lenalloc;
 						mark_inode_dirty(inode);
@@ -134,9 +136,9 @@
 				}
 
 				udf_release_data(bh);
-				bh = NULL;
-
+				extoffset = sizeof(struct allocExtDesc);
 				bloc = eloc;
+				bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, bloc, 0));
 				if (elen)
 					lelen = (elen + inode->i_sb->s_blocksize - 1) >>
 						inode->i_sb->s_blocksize_bits;
@@ -152,15 +154,15 @@
 
 		if (lelen)
 		{
-			if (!memcmp(&UDF_I_LOCATION(inode), &bloc, sizeof(lb_addr)))
-				memset(bh->b_data, 0x00, udf_file_entry_alloc_offset(inode));
+			if (!bh)
+				BUG();
 			else
 				memset(bh->b_data, 0x00, sizeof(struct allocExtDesc));
 			udf_free_blocks(inode->i_sb, inode, bloc, 0, lelen);
 		}
 		else
 		{
-			if (!memcmp(&UDF_I_LOCATION(inode), &bloc, sizeof(lb_addr)))
+			if (!bh)
 			{
 				UDF_I_LENALLOC(inode) = lenalloc;
 				mark_inode_dirty(inode);
diff -u -r -N ../../linus/2.4/linux/fs/udf/udf_i.h linux/fs/udf/udf_i.h
--- ../../linus/2.4/linux/fs/udf/udf_i.h	Tue Aug  6 21:16:21 2002
+++ linux/fs/udf/udf_i.h	Thu Aug  8 20:44:32 2002
@@ -9,14 +9,17 @@
 #define UDF_I_LENEXTENTS(X)	( UDF_I(X)->i_lenExtents )
 #define UDF_I_UNIQUE(X)		( UDF_I(X)->i_unique )
 #define UDF_I_ALLOCTYPE(X)	( UDF_I(X)->i_alloc_type )
-#define UDF_I_EXTENDED_FE(X)	( UDF_I(X)->i_extended_fe )
-#define UDF_I_STRAT4096(X)	( UDF_I(X)->i_strat_4096 )
-#define UDF_I_NEW_INODE(X)	( UDF_I(X)->i_new_inode )
+#define UDF_I_EFE(X)		( UDF_I(X)->i_efe )
+#define UDF_I_USE(X)		( UDF_I(X)->i_use )
+#define UDF_I_STRAT4096(X)	( UDF_I(X)->i_strat4096 )
 #define UDF_I_NEXT_ALLOC_BLOCK(X)	( UDF_I(X)->i_next_alloc_block )
 #define UDF_I_NEXT_ALLOC_GOAL(X)	( UDF_I(X)->i_next_alloc_goal )
 #define UDF_I_UMTIME(X)		( UDF_I(X)->i_umtime )
 #define UDF_I_UCTIME(X)		( UDF_I(X)->i_uctime )
 #define UDF_I_CRTIME(X)		( UDF_I(X)->i_crtime )
 #define UDF_I_UCRTIME(X)	( UDF_I(X)->i_ucrtime )
+#define UDF_I_SAD(X)		( UDF_I(X)->i_ext.i_sad )
+#define UDF_I_LAD(X)		( UDF_I(X)->i_ext.i_lad )
+#define UDF_I_DATA(X)		( UDF_I(X)->i_ext.i_data )
 
 #endif /* !defined(_LINUX_UDF_I_H) */
diff -u -r -N ../../linus/2.4/linux/fs/udf/udfdecl.h linux/fs/udf/udfdecl.h
--- ../../linus/2.4/linux/fs/udf/udfdecl.h	Tue Aug  6 21:16:21 2002
+++ linux/fs/udf/udfdecl.h	Thu Aug  8 20:44:32 2002
@@ -34,9 +34,11 @@
 #define CURRENT_UTIME	(xtime.tv_usec)
 
 #define udf_file_entry_alloc_offset(inode)\
-	((UDF_I_EXTENDED_FE(inode) ?\
-		sizeof(struct extendedFileEntry) :\
-		sizeof(struct fileEntry)) + UDF_I_LENEATTR(inode))
+	(UDF_I_USE(inode) ?\
+		sizeof(struct unallocSpaceEntry) :\
+		((UDF_I_EFE(inode) ?\
+			sizeof(struct extendedFileEntry) :\
+			sizeof(struct fileEntry)) + UDF_I_LENEATTR(inode)))
 
 #define udf_ext0_offset(inode)\
 	(UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB ?\
@@ -113,6 +115,7 @@
 extern void udf_read_inode(struct inode *);
 extern void udf_put_inode(struct inode *);
 extern void udf_delete_inode(struct inode *);
+extern void udf_clear_inode(struct inode *);
 extern void udf_write_inode(struct inode *, int);
 extern long udf_block_map(struct inode *, long);
 extern int8_t inode_bmap(struct inode *, int, lb_addr *, uint32_t *, lb_addr *, uint32_t *, uint32_t *, struct buffer_head **);
@@ -199,8 +202,8 @@
 /* directory.c */
 extern struct fileIdentDesc * udf_get_fileident(void * buffer, int bufsize, int * offset);
 extern extent_ad * udf_get_fileextent(void * buffer, int bufsize, int * offset);
-extern long_ad * udf_get_filelongad(void * buffer, int bufsize, int * offset, int);
-extern short_ad * udf_get_fileshortad(void * buffer, int bufsize, int * offset, int);
+extern long_ad * udf_get_filelongad(uint8_t *, int, int *, int);
+extern short_ad * udf_get_fileshortad(uint8_t *, int, int *, int);
 extern uint8_t * udf_get_filead(struct fileEntry *, uint8_t *, int, int, int, int *);
 
 #endif /* __UDF_DECL_H */
diff -u -r -N ../../linus/2.4/linux/include/linux/cdrom.h linux/include/linux/cdrom.h
--- ../../linus/2.4/linux/include/linux/cdrom.h	Tue Aug  6 21:17:10 2002
+++ linux/include/linux/cdrom.h	Tue Aug  6 21:24:33 2002
@@ -494,6 +494,7 @@
 /* Mode page codes for mode sense/set */
 #define GPMODE_R_W_ERROR_PAGE		0x01
 #define GPMODE_WRITE_PARMS_PAGE		0x05
+#define GPMODE_WCACHING_PAGE		0x08
 #define GPMODE_AUDIO_CTL_PAGE		0x0e
 #define GPMODE_POWER_PAGE		0x1a
 #define GPMODE_FAULT_FAIL_PAGE		0x1c
@@ -504,6 +505,11 @@
  * of MODE_SENSE_POWER_PAGE */
 #define GPMODE_CDROM_PAGE		0x0d
 
+#define GPMODE_PAGE_CURRENT		0
+#define GPMODE_PAGE_CHANGE		1
+#define GPMODE_PAGE_DEFAULT		2
+#define GPMODE_PAGE_SAVE		3
+
 
 
 /* DVD struct types */
diff -u -r -N ../../linus/2.4/linux/include/linux/fs.h linux/include/linux/fs.h
--- ../../linus/2.4/linux/include/linux/fs.h	Tue Aug  6 21:17:11 2002
+++ linux/include/linux/fs.h	Tue Aug  6 21:24:33 2002
@@ -894,6 +894,7 @@
 	int (*remount_fs) (struct super_block *, int *, char *);
 	void (*clear_inode) (struct inode *);
 	void (*umount_begin) (struct super_block *);
+	int (*relocate_blocks) (struct super_block *, unsigned long, unsigned long *);
 
 	/* Following are for knfsd to interact with "interesting" filesystems
 	 * Currently just reiserfs, but possibly FAT and others later
diff -u -r -N ../../linus/2.4/linux/include/linux/major.h linux/include/linux/major.h
--- ../../linus/2.4/linux/include/linux/major.h	Tue Aug  6 21:17:15 2002
+++ linux/include/linux/major.h	Tue Aug  6 21:24:45 2002
@@ -108,6 +108,8 @@
 #define SPECIALIX_NORMAL_MAJOR 75
 #define SPECIALIX_CALLOUT_MAJOR 76
 
+#define PACKET_MAJOR		97
+
 #define COMPAQ_CISS_MAJOR 	104
 #define COMPAQ_CISS_MAJOR1	105
 #define COMPAQ_CISS_MAJOR2      106
diff -u -r -N ../../linus/2.4/linux/include/linux/pktcdvd.h linux/include/linux/pktcdvd.h
--- ../../linus/2.4/linux/include/linux/pktcdvd.h	Thu Jan  1 01:00:00 1970
+++ linux/include/linux/pktcdvd.h	Wed Aug  7 21:55:25 2002
@@ -0,0 +1,215 @@
+/*
+ * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
+ *
+ * May be copied or modified under the terms of the GNU General Public
+ * License.  See linux/COPYING for more information.
+ *
+ * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and
+ * DVD-RW devices.
+ *
+ */
+#ifndef __PKTCDVD_H
+#define __PKTCDVD_H
+
+/*
+ * 1 for normal debug messages, 2 is very verbose. 0 to turn it off.
+ */
+#define PACKET_DEBUG		1
+
+#define	MAX_WRITERS		8
+
+#define STACKED_BH_POOL_SIZE	64
+
+/*
+ * use drive write caching -- we need deferred error handling to be
+ * able to sucessfully recover with this option (drive will return good
+ * status as soon as the cdb is validated).
+ */
+#if defined(CONFIG_CDROM_PKTCDVD_WCACHE)
+#warning Enabling write caching, use at your own risk
+#define USE_WCACHING		1
+#else
+#define USE_WCACHING		0
+#endif
+
+/*
+ * No user-servicable parts beyond this point ->
+ */
+
+#if PACKET_DEBUG
+#define DPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
+#else
+#define DPRINTK(fmt, args...)
+#endif
+
+#if PACKET_DEBUG > 1
+#define VPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
+#else
+#define VPRINTK(fmt, args...)
+#endif
+
+#define PKT_BUF_LIST		0x89
+
+/*
+ * device types
+ */
+#define PACKET_CDR		1
+#define	PACKET_CDRW		2
+#define PACKET_DVDR		3
+#define PACKET_DVDRW		4
+
+/*
+ * flags
+ */
+#define PACKET_WRITEABLE	1	/* pd is writeable */
+#define PACKET_NWA_VALID	2	/* next writeable address valid */
+#define PACKET_LRA_VALID	3	/* last recorded address valid */
+#define PACKET_READONLY		4	/* read only pd */
+#define PACKET_RECOVERY		5	/* rq recovery in progress */
+#define PACKET_RQ		6	/* current rq is set */
+#define PACKET_BUSY		7	/* current rq is being processed */
+
+/*
+ * Disc status -- from READ_DISC_INFO
+ */
+#define PACKET_DISC_EMPTY	0
+#define PACKET_DISC_INCOMPLETE	1
+#define PACKET_DISC_COMPLETE	2
+#define PACKET_DISC_OTHER	3
+
+/*
+ * write type, and corresponding data block type
+ */
+#define PACKET_MODE1		1
+#define PACKET_MODE2		2
+#define PACKET_BLOCK_MODE1	8
+#define PACKET_BLOCK_MODE2	10
+
+/*
+ * Last session/border status
+ */
+#define PACKET_SESSION_EMPTY		0
+#define PACKET_SESSION_INCOMPLETE	1
+#define PACKET_SESSION_RESERVED		2
+#define PACKET_SESSION_COMPLETE		3
+
+#define PACKET_MCN			"4a656e734178626f65323030300000"
+
+#undef PACKET_USE_LS
+
+/*
+ * special requests
+ */
+#define PKT_THROTTLE_SPEED	1
+
+#define PKT_TRAY_UNLOCK		0
+#define PKT_TRAY_LOCK		1
+
+/*
+ * Very crude stats for now
+ */
+struct packet_stats
+{
+	unsigned long		bh_s;
+	unsigned long		bh_e;
+	unsigned long		bh_cache_hits;
+	unsigned long		page_cache_hits;
+	unsigned long		secs_w;
+	unsigned long		secs_r;
+};
+
+/*
+ * packet ioctls
+ */
+#define PACKET_IOCTL_MAGIC	('X')
+#define PACKET_GET_STATS	_IOR(PACKET_IOCTL_MAGIC, 0, struct packet_stats)
+#define PACKET_SETUP_DEV	_IOW(PACKET_IOCTL_MAGIC, 1, unsigned int)
+#define PACKET_TEARDOWN_DEV	_IOW(PACKET_IOCTL_MAGIC, 2, unsigned int)
+
+#ifdef __KERNEL__
+#include <linux/blkdev.h>
+#include <linux/completion.h>
+
+struct packet_settings
+{
+	__u8			size;		/* packet size in frames */
+	__u8			fp;		/* fixed packets */
+	__u8			link_loss;	/* the rest is specified
+						 * as per Mt Fuji */
+	__u8			write_type;
+	__u8			track_mode;
+	__u8			block_mode;
+};
+
+struct packet_cdrw
+{
+	struct buffer_head		*bhlist;	/* string of bhs */
+	atomic_t			free_bh;
+	merge_request_fn		*front_merge_fn;
+	merge_request_fn		*back_merge_fn;
+	merge_requests_fn		*merge_requests_fn;
+	request_queue_t			r_queue;
+	void				*queuedata;
+	pid_t				pid;
+	struct completion		thr_compl;
+};
+
+struct pktcdvd_device
+{
+	struct block_device		*bdev;
+	kdev_t				dev;		/* dev attached */
+	kdev_t				pkt_dev;	/* our dev */
+	char				name[20];
+	struct cdrom_device_info	*cdi;		/* cdrom matching dev */
+	struct packet_settings		settings;
+	struct packet_stats		stats;
+	atomic_t			refcnt;
+	__u8				speed;		/* cur write speed */
+	unsigned long			offset;		/* start offset */
+	__u8				mode_offset;	/* 0 / 8 */
+	__u8				type;
+	unsigned long			flags;
+	__u8				disc_status;
+	__u8				track_status;	/* last one */
+	__u32				nwa;	/* next writable address */
+	__u32				lra;	/* last recorded address */
+	spinlock_t			lock;
+	struct packet_cdrw		cdrw;
+	wait_queue_head_t		wqueue;
+	struct request			*rq;
+	atomic_t			wrqcnt;
+	struct buffer_head		*stacked_bhlist;
+	int				stacked_bhcnt;
+
+	struct semaphore		cache_sync_mutex;
+	int				unflushed_writes;
+
+	make_request_fn			*make_request_fn;
+};
+
+/*
+ * following possibly belongs in cdrom.h
+ */
+
+struct cdvd_capacity
+{
+	__u32 lba;
+	__u32 block_length;
+};
+
+void pkt_elevator_merge_req(struct request *rq, struct request *nxt) {}
+void pkt_elevator_cleanup(request_queue_t *q, struct request *rq, int count) {}
+
+#define ELEVATOR_PKTCDVD					\
+((elevator_t) {							\
+	0,				/* not used */		\
+	0,				/* not used */		\
+								\
+	pkt_elevator_merge,		/* elevator_merge_fn */ \
+	pkt_elevator_cleanup,					\
+	pkt_elevator_merge_req,					\
+	})
+
+#endif /* __KERNEL__ */
+
+#endif /* __PKTCDVD_H */
diff -u -r -N ../../linus/2.4/linux/include/linux/udf_fs.h linux/include/linux/udf_fs.h
--- ../../linus/2.4/linux/include/linux/udf_fs.h	Tue Aug  6 21:17:20 2002
+++ linux/include/linux/udf_fs.h	Thu Aug  8 20:44:32 2002
@@ -30,7 +30,6 @@
  * HISTORY
  *
  */
-#include <linux/config.h>
 
 #ifndef _UDF_FS_H
 #define _UDF_FS_H 1
@@ -38,19 +37,9 @@
 #define UDF_PREALLOCATE
 #define UDF_DEFAULT_PREALLOC_BLOCKS	8
 
-#define UDFFS_DATE			"2002/03/11"
+#define UDFFS_DATE			"2002/03/14"
 #define UDFFS_VERSION			"0.9.6"
 
-#if !defined(UDFFS_RW)
-
-#if defined(CONFIG_UDF_RW)
-#define UDFFS_RW			1
-#else /* !defined(CONFIG_UDF_RW) */
-#define UDFFS_RW			0
-#endif /* defined(CONFIG_UDF_RW) */
-
-#endif /* !defined(UDFFS_RW) */
-
 #define UDFFS_DEBUG
 
 #ifdef UDFFS_DEBUG
@@ -67,4 +56,12 @@
 #define udf_info(f, a...) \
 		printk (KERN_INFO "UDF-fs INFO " f, ##a);
 
+#ifdef __KERNEL__
+
+#ifndef LINUX_VERSION_CODE
+#include <linux/version.h>
+#endif
+
+#endif /* __KERNEL__ */
+
 #endif /* _UDF_FS_H */
diff -u -r -N ../../linus/2.4/linux/include/linux/udf_fs_i.h linux/include/linux/udf_fs_i.h
--- ../../linus/2.4/linux/include/linux/udf_fs_i.h	Tue Aug  6 21:17:20 2002
+++ linux/include/linux/udf_fs_i.h	Thu Aug  8 20:44:32 2002
@@ -23,30 +23,49 @@
 #ifndef _ECMA_167_H
 typedef struct
 {
-	__u32 logicalBlockNum;
-	__u16 partitionReferenceNum;
+	__u32			logicalBlockNum;
+	__u16			partitionReferenceNum;
 } __attribute__ ((packed)) lb_addr;
+
+typedef struct
+{
+	__u32			extLength;
+	__u32			extPosition;
+} __attribute__ ((packed)) short_ad;
+
+typedef struct
+{
+	__u32			extLength;
+	lb_addr			extLocation;
+	__u8			impUse[6];
+} __attribute__ ((packed)) long_ad;
 #endif
 
 struct udf_inode_info
 {
-	long i_umtime;
-	long i_uctime;
-	long i_crtime;
-	long i_ucrtime;
+	long			i_umtime;
+	long			i_uctime;
+	long			i_crtime;
+	long			i_ucrtime;
 	/* Physical address of inode */
-	lb_addr i_location;
-	__u64 i_unique;
-	__u32 i_lenEAttr;
-	__u32 i_lenAlloc;
-	__u64 i_lenExtents;
-	__u32 i_next_alloc_block;
-	__u32 i_next_alloc_goal;
-	unsigned i_alloc_type : 3;
-	unsigned i_extended_fe : 1;
-	unsigned i_strat_4096 : 1;
-	unsigned i_new_inode : 1;
-	unsigned reserved : 26;
+	lb_addr			i_location;
+	__u64			i_unique;
+	__u32			i_lenEAttr;
+	__u32			i_lenAlloc;
+	__u64			i_lenExtents;
+	__u32			i_next_alloc_block;
+	__u32			i_next_alloc_goal;
+	unsigned		i_alloc_type : 3;
+	unsigned		i_efe : 1;
+	unsigned		i_use : 1;
+	unsigned		i_strat4096 : 1;
+	unsigned		reserved : 26;
+	union
+	{
+		short_ad	*i_sad;
+		long_ad		*i_lad;
+		__u8		*i_data;
+	} i_ext;
 };
 
 #endif