fix lock-up issues in lzma pcomp support in 2.6.30 (used in squashfs). thanks to jeff hansen for the patch

SVN-Revision: 16886
master
Felix Fietkau 16 years ago
parent 12b429b3a2
commit af6076273a
  1. 57
      target/linux/generic-2.6/patches-2.6.30/052-pcomp_lzma_support.patch

@ -1,6 +1,6 @@
--- /dev/null --- /dev/null
+++ b/crypto/unlzma.c +++ b/crypto/unlzma.c
@@ -0,0 +1,710 @@ @@ -0,0 +1,721 @@
+/* +/*
+ * LZMA uncompresion module for pcomp + * LZMA uncompresion module for pcomp
+ * Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org> + * Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
@ -69,6 +69,7 @@
+ u8 previous_byte; + u8 previous_byte;
+ ssize_t pos; + ssize_t pos;
+ struct unlzma_buffer *head; + struct unlzma_buffer *head;
+ int buf_full;
+ +
+ /* cstate */ + /* cstate */
+ int state; + int state;
@ -87,11 +88,32 @@
+} +}
+ +
+static void +static void
+get_buffer(struct unlzma_ctx *ctx)
+{
+ struct unlzma_buffer *bh;
+
+ bh = kzalloc(sizeof(struct unlzma_buffer), GFP_KERNEL);
+ bh->ptr = ctx->next_out;
+ bh->offset = ctx->pos;
+ bh->last = ctx->head;
+ bh->size = ctx->avail_out;
+ ctx->head = bh;
+ ctx->buf_full = 0;
+}
+
+static void
+unlzma_request_buffer(struct unlzma_ctx *ctx, int *avail) +unlzma_request_buffer(struct unlzma_ctx *ctx, int *avail)
+{ +{
+ mutex_unlock(&ctx->mutex); + do {
+ wait_event(ctx->next_req, unlzma_should_stop(ctx) || (*avail > 0)); + mutex_unlock(&ctx->mutex);
+ mutex_lock(&ctx->mutex); + if (wait_event_interruptible(ctx->next_req,
+ unlzma_should_stop(ctx) || (*avail > 0)))
+ schedule();
+ mutex_lock(&ctx->mutex);
+ } while (*avail <= 0 && !unlzma_should_stop(ctx));
+
+ if (!unlzma_should_stop(ctx) && ctx->buf_full)
+ get_buffer(ctx);
+} +}
+ +
+static u8 +static u8
@ -196,36 +218,20 @@
+ +
+ while (bh->offset > pos) { + while (bh->offset > pos) {
+ bh = bh->last; + bh = bh->last;
+ if (!bh) + BUG_ON(!bh);
+ return 0;
+ } + }
+ +
+ pos -= bh->offset; + pos -= bh->offset;
+ if (pos > bh->size) + BUG_ON(pos >= bh->size);
+ return 0;
+ +
+ return bh->ptr[pos]; + return bh->ptr[pos];
+} +}
+ +
+static void +static void
+get_buffer(struct unlzma_ctx *ctx)
+{
+ struct unlzma_buffer *bh;
+
+ bh = kzalloc(sizeof(struct unlzma_buffer), GFP_KERNEL);
+ bh->ptr = ctx->next_out;
+ bh->offset = ctx->pos;
+ bh->last = ctx->head;
+ bh->size = ctx->avail_out;
+ ctx->head = bh;
+}
+
+static void
+write_byte(struct unlzma_ctx *ctx, u8 byte) +write_byte(struct unlzma_ctx *ctx, u8 byte)
+{ +{
+ if (unlikely(ctx->avail_out <= 0)) { + if (unlikely(ctx->avail_out <= 0)) {
+ unlzma_request_buffer(ctx, &ctx->avail_out); + unlzma_request_buffer(ctx, &ctx->avail_out);
+ get_buffer(ctx);
+ } + }
+ +
+ if (!ctx->avail_out) + if (!ctx->avail_out)
@ -234,6 +240,8 @@
+ ctx->previous_byte = byte; + ctx->previous_byte = byte;
+ *(ctx->next_out++) = byte; + *(ctx->next_out++) = byte;
+ ctx->avail_out--; + ctx->avail_out--;
+ if (ctx->avail_out == 0)
+ ctx->buf_full = 1;
+ ctx->pos++; + ctx->pos++;
+} +}
+ +
@ -489,7 +497,8 @@
+ if (ctx->workspace_size < num_probs * sizeof(*p)) { + if (ctx->workspace_size < num_probs * sizeof(*p)) {
+ if (ctx->workspace) + if (ctx->workspace)
+ vfree(ctx->workspace); + vfree(ctx->workspace);
+ ctx->workspace = vmalloc(num_probs * sizeof(*p)); + ctx->workspace_size = num_probs * sizeof(*p);
+ ctx->workspace = vmalloc(ctx->workspace_size);
+ } + }
+ p = (u16 *) ctx->workspace; + p = (u16 *) ctx->workspace;
+ if (!p) + if (!p)
@ -652,6 +661,8 @@
+ req->avail_in = ctx->avail_in; + req->avail_in = ctx->avail_in;
+ req->next_out = ctx->next_out; + req->next_out = ctx->next_out;
+ req->avail_out = ctx->avail_out; + req->avail_out = ctx->avail_out;
+ ctx->next_in = 0;
+ ctx->avail_in = 0;
+ pos = ctx->pos - pos; + pos = ctx->pos - pos;
+ +
+out: +out:

Loading…
Cancel
Save