import org.apache.coyote.ActionCode;
import org.apache.coyote.OutputBuffer;
import org.apache.coyote.Response;
+import org.apache.coyote.http11.filters.GzipOutputFilter;
import org.apache.tomcat.util.buf.ByteChunk;
import org.apache.tomcat.util.buf.CharChunk;
import org.apache.tomcat.util.buf.MessageBytes;
*/
protected static final StringManager sm =
StringManager.getManager(Constants.Package);
+
+ /**
+ * Logger.
+ */
+ protected static org.apache.juli.logging.Log log
+ = org.apache.juli.logging.LogFactory.getLog(AbstractOutputBuffer.class);
// ------------------------------------------------------------- Properties
response.action(ActionCode.ACTION_COMMIT, null);
}
+
+ // go through the filters and if there is gzip filter
+ // invoke it to flush
+ for (int i = 0; i <= lastActiveFilter; i++) {
+ if (activeFilters[i] instanceof GzipOutputFilter) {
+ if (log.isDebugEnabled()) {
+ log.debug("Flushing the gzip filter at position " + i +
+ " of the filter chain...");
+ }
+ ((GzipOutputFilter) activeFilters[i]).flush();
+ break;
+ }
+ }
}
/**
--- /dev/null
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.coyote.http11.filters;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.zip.Deflater;
+import java.util.zip.GZIPOutputStream;
+
+/**
+ * Extension of {@link GZIPOutputStream} to workaround for a couple of long
+ * standing JDK bugs
+ * (<a href="http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4255743">Bug
+ * 4255743</a> and
+ * <a href="http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4813885">Bug
+ * 4813885</a>) so the GZIP'd output can be flushed.
+ */
+public class FlushableGZIPOutputStream extends GZIPOutputStream {
+ public FlushableGZIPOutputStream(OutputStream os) throws IOException {
+ super(os);
+ }
+
+ private static final byte[] EMPTYBYTEARRAY = new byte[0];
+ private boolean hasData = false;
+
+ /**
+ * Here we make sure we have received data, so that the header has been for
+ * sure written to the output stream already.
+ */
+ @Override
+ public synchronized void write(byte[] bytes, int i, int i1)
+ throws IOException {
+ super.write(bytes, i, i1);
+ hasData = true;
+ }
+
+ @Override
+ public synchronized void write(int i) throws IOException {
+ super.write(i);
+ hasData = true;
+ }
+
+ @Override
+ public synchronized void write(byte[] bytes) throws IOException {
+ super.write(bytes);
+ hasData = true;
+ }
+
+ @Override
+ public synchronized void flush() throws IOException {
+ if (!hasData) {
+ return; // do not allow the gzip header to be flushed on its own
+ }
+
+ // trick the deflater to flush
+ /**
+ * Now this is tricky: We force the Deflater to flush its data by
+ * switching compression level. As yet, a perplexingly simple workaround
+ * for
+ * http://developer.java.sun.com/developer/bugParade/bugs/4255743.html
+ */
+ if (!def.finished()) {
+ def.setInput(EMPTYBYTEARRAY, 0, 0);
+
+ def.setLevel(Deflater.NO_COMPRESSION);
+ deflate();
+
+ def.setLevel(Deflater.DEFAULT_COMPRESSION);
+ deflate();
+
+ out.flush();
+ }
+
+ hasData = false; // no more data to flush
+ }
+
+ /*
+ * Keep on calling deflate until it runs dry. The default implementation
+ * only does it once and can therefore hold onto data when they need to be
+ * flushed out.
+ */
+ @Override
+ protected void deflate() throws IOException {
+ int len;
+ do {
+ len = def.deflate(buf, 0, buf.length);
+ if (len > 0) {
+ out.write(buf, 0, len);
+ }
+ } while (len != 0);
+ }
+
+}
protected static final ByteChunk ENCODING = new ByteChunk();
+ /**
+ * Logger.
+ */
+ protected static org.apache.juli.logging.Log log =
+ org.apache.juli.logging.LogFactory.getLog(GzipOutputFilter.class);
+
+
// ----------------------------------------------------- Static Initializer
public int doWrite(ByteChunk chunk, Response res)
throws IOException {
if (compressionStream == null) {
- compressionStream = new GZIPOutputStream(fakeOutputStream);
+ compressionStream = new FlushableGZIPOutputStream(fakeOutputStream);
}
compressionStream.write(chunk.getBytes(), chunk.getStart(),
chunk.getLength());
// --------------------------------------------------- OutputFilter Methods
+ /**
+ * Added to allow flushing to happen for the gzip'ed outputstream
+ */
+ public void flush() {
+ if (compressionStream != null) {
+ try {
+ if (log.isDebugEnabled()) {
+ log.debug("Flushing the compression stream!");
+ }
+ compressionStream.flush();
+ } catch (IOException e) {
+ if (log.isDebugEnabled()) {
+ log.debug("Ignored exception while flushing gzip filter", e);
+ }
+ }
+ }
+ }
/**
* Some filters need additional parameters from the response. All the
public long end()
throws IOException {
if (compressionStream == null) {
- compressionStream = new GZIPOutputStream(fakeOutputStream);
+ compressionStream = new FlushableGZIPOutputStream(fakeOutputStream);
}
compressionStream.finish();
compressionStream.close();
--- /dev/null
+package org.apache.coyote.http11;
+
+import java.io.ByteArrayOutputStream;
+import java.util.zip.GZIPOutputStream;
+
+import junit.framework.TestCase;
+
+import org.apache.coyote.Response;
+import org.apache.coyote.http11.filters.GzipOutputFilter;
+import org.apache.tomcat.util.buf.ByteChunk;
+
+/**
+ * Test case to demonstrate the interaction between gzip and flushing in the
+ * output filter.
+ */
+public class TestGzipOutputFilter extends TestCase {
+
+ /**
+ * Test the interaction betwen gzip and flushing. The idea is to: 1. create
+ * a internal output buffer, response, and attach an active gzipoutputfilter
+ * to the output buffer 2. set the output stream of the internal buffer to
+ * be a ByteArrayOutputStream so we can inspect the output bytes 3. write a
+ * chunk out using the gzipoutputfilter and invoke a flush on the
+ * InternalOutputBuffer 4. read from the ByteArrayOutputStream to find out
+ * what's being written out (flushed) 5. find out what's expected by wrting
+ * to GZIPOutputStream and close it (to force flushing) 6. Compare the size
+ * of the two arrays, they should be close (instead of one being much
+ * shorter than the other one)
+ *
+ * @throws Exception
+ */
+ public void testFlushingWithGzip() throws Exception {
+ // set up response, InternalOutputBuffer, and ByteArrayOutputStream
+ Response res = new Response();
+ InternalOutputBuffer iob = new InternalOutputBuffer(res);
+ ByteArrayOutputStream bos = new ByteArrayOutputStream();
+ iob.setOutputStream(bos);
+ res.setOutputBuffer(iob);
+
+ // set up GzipOutputFilter to attach to the InternalOutputBuffer
+ GzipOutputFilter gf = new GzipOutputFilter();
+ iob.addFilter(gf);
+ iob.addActiveFilter(gf);
+
+ // write a chunk out
+ ByteChunk chunk = new ByteChunk(1024);
+ byte[] d = "Hello there tomcat developers, there is a bug in JDK".getBytes();
+ chunk.append(d, 0, d.length);
+ iob.doWrite(chunk, res);
+
+ // flush the InternalOutputBuffer
+ iob.flush();
+
+ // read from the ByteArrayOutputStream to find out what's being written
+ // out (flushed)
+ byte[] dataFound = bos.toByteArray();
+
+ // find out what's expected by wrting to GZIPOutputStream and close it
+ // (to force flushing)
+ ByteArrayOutputStream gbos = new ByteArrayOutputStream(1024);
+ GZIPOutputStream gos = new GZIPOutputStream(gbos);
+ gos.write(d);
+ gos.close();
+
+ // read the expected data
+ byte[] dataExpected = gbos.toByteArray();
+
+ // most of the data should have been flushed out
+ assertTrue(dataFound.length >= (dataExpected.length - 20));
+ }
+}
some debug logging to the jar scanner. (rjung)
</update>
<fix>
+ <bug>48738</bug>: Workaround a couple of long standing JDK bugs to
+ enable GZIP compressed output streams to be flushed. Based on a patch
+ provided by Jiong Wang. (markt)
+ </fix>
+ <fix>
<bug>49670</bug>: Restore SSO functionality that was broken by Lifecycle
refactoring. (markt)
</fix>