feat(jdk8): move files to new folder to avoid resources compiled.
This commit is contained in:
780
jdkSrc/jdk8/sun/net/www/http/ChunkedInputStream.java
Normal file
780
jdkSrc/jdk8/sun/net/www/http/ChunkedInputStream.java
Normal file
@@ -0,0 +1,780 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package sun.net.www.http;
|
||||
|
||||
import java.io.*;
|
||||
import java.util.*;
|
||||
|
||||
import sun.net.*;
|
||||
import sun.net.www.*;
|
||||
|
||||
/**
|
||||
* A <code>ChunkedInputStream</code> provides a stream for reading a body of
|
||||
* a http message that can be sent as a series of chunks, each with its own
|
||||
* size indicator. Optionally the last chunk can be followed by trailers
|
||||
* containing entity-header fields.
|
||||
* <p>
|
||||
* A <code>ChunkedInputStream</code> is also <code>Hurryable</code> so it
|
||||
* can be hurried to the end of the stream if the bytes are available on
|
||||
* the underlying stream.
|
||||
*/
|
||||
public
|
||||
class ChunkedInputStream extends InputStream implements Hurryable {
|
||||
|
||||
/**
|
||||
* The underlying stream
|
||||
*/
|
||||
private InputStream in;
|
||||
|
||||
/**
|
||||
* The <code>HttpClient</code> that should be notified when the chunked stream has
|
||||
* completed.
|
||||
*/
|
||||
private HttpClient hc;
|
||||
|
||||
/**
|
||||
* The <code>MessageHeader</code> that is populated with any optional trailer
|
||||
* that appear after the last chunk.
|
||||
*/
|
||||
private MessageHeader responses;
|
||||
|
||||
/**
|
||||
* The size, in bytes, of the chunk that is currently being read.
|
||||
* This size is only valid if the current position in the underlying
|
||||
* input stream is inside a chunk (ie: state == STATE_READING_CHUNK).
|
||||
*/
|
||||
private int chunkSize;
|
||||
|
||||
/**
|
||||
* The number of bytes read from the underlying stream for the current
|
||||
* chunk. This value is always in the range <code>0</code> through to
|
||||
* <code>chunkSize</code>
|
||||
*/
|
||||
private int chunkRead;
|
||||
|
||||
/**
|
||||
* The internal buffer array where chunk data is available for the
|
||||
* application to read.
|
||||
*/
|
||||
private byte chunkData[] = new byte[4096];
|
||||
|
||||
/**
|
||||
* The current position in the buffer. It contains the index
|
||||
* of the next byte to read from <code>chunkData</code>
|
||||
*/
|
||||
private int chunkPos;
|
||||
|
||||
/**
|
||||
* The index one greater than the index of the last valid byte in the
|
||||
* buffer. This value is always in the range <code>0</code> through
|
||||
* <code>chunkData.length</code>.
|
||||
*/
|
||||
private int chunkCount;
|
||||
|
||||
/**
|
||||
* The internal buffer where bytes from the underlying stream can be
|
||||
* read. It may contain bytes representing chunk-size, chunk-data, or
|
||||
* trailer fields.
|
||||
*/
|
||||
private byte rawData[] = new byte[32];
|
||||
|
||||
/**
|
||||
* The current position in the buffer. It contains the index
|
||||
* of the next byte to read from <code>rawData</code>
|
||||
*/
|
||||
private int rawPos;
|
||||
|
||||
/**
|
||||
* The index one greater than the index of the last valid byte in the
|
||||
* buffer. This value is always in the range <code>0</code> through
|
||||
* <code>rawData.length</code>.
|
||||
*/
|
||||
private int rawCount;
|
||||
|
||||
/**
|
||||
* Indicates if an error was encountered when processing the chunked
|
||||
* stream.
|
||||
*/
|
||||
private boolean error;
|
||||
|
||||
/**
|
||||
* Indicates if the chunked stream has been closed using the
|
||||
* <code>close</code> method.
|
||||
*/
|
||||
private boolean closed;
|
||||
|
||||
/*
|
||||
* Maximum chunk header size of 2KB + 2 bytes for CRLF
|
||||
*/
|
||||
private final static int MAX_CHUNK_HEADER_SIZE = 2050;
|
||||
|
||||
/**
|
||||
* State to indicate that next field should be :-
|
||||
* chunk-size [ chunk-extension ] CRLF
|
||||
*/
|
||||
static final int STATE_AWAITING_CHUNK_HEADER = 1;
|
||||
|
||||
/**
|
||||
* State to indicate that we are currently reading the chunk-data.
|
||||
*/
|
||||
static final int STATE_READING_CHUNK = 2;
|
||||
|
||||
/**
|
||||
* Indicates that a chunk has been completely read and the next
|
||||
* fields to be examine should be CRLF
|
||||
*/
|
||||
static final int STATE_AWAITING_CHUNK_EOL = 3;
|
||||
|
||||
/**
|
||||
* Indicates that all chunks have been read and the next field
|
||||
* should be optional trailers or an indication that the chunked
|
||||
* stream is complete.
|
||||
*/
|
||||
static final int STATE_AWAITING_TRAILERS = 4;
|
||||
|
||||
/**
|
||||
* State to indicate that the chunked stream is complete and
|
||||
* no further bytes should be read from the underlying stream.
|
||||
*/
|
||||
static final int STATE_DONE = 5;
|
||||
|
||||
/**
|
||||
* Indicates the current state.
|
||||
*/
|
||||
private int state;
|
||||
|
||||
|
||||
/**
|
||||
* Check to make sure that this stream has not been closed.
|
||||
*/
|
||||
private void ensureOpen() throws IOException {
|
||||
if (closed) {
|
||||
throw new IOException("stream is closed");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Ensures there is <code>size</code> bytes available in
|
||||
* <code>rawData</code>. This requires that we either
|
||||
* shift the bytes in use to the begining of the buffer
|
||||
* or allocate a large buffer with sufficient space available.
|
||||
*/
|
||||
private void ensureRawAvailable(int size) {
|
||||
if (rawCount + size > rawData.length) {
|
||||
int used = rawCount - rawPos;
|
||||
if (used + size > rawData.length) {
|
||||
byte tmp[] = new byte[used + size];
|
||||
if (used > 0) {
|
||||
System.arraycopy(rawData, rawPos, tmp, 0, used);
|
||||
}
|
||||
rawData = tmp;
|
||||
} else {
|
||||
if (used > 0) {
|
||||
System.arraycopy(rawData, rawPos, rawData, 0, used);
|
||||
}
|
||||
}
|
||||
rawCount = used;
|
||||
rawPos = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Close the underlying input stream by either returning it to the
|
||||
* keep alive cache or closing the stream.
|
||||
* <p>
|
||||
* As a chunked stream is inheritly persistent (see HTTP 1.1 RFC) the
|
||||
* underlying stream can be returned to the keep alive cache if the
|
||||
* stream can be completely read without error.
|
||||
*/
|
||||
private void closeUnderlying() throws IOException {
|
||||
if (in == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!error && state == STATE_DONE) {
|
||||
hc.finished();
|
||||
} else {
|
||||
if (!hurry()) {
|
||||
hc.closeServer();
|
||||
}
|
||||
}
|
||||
|
||||
in = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempt to read the remainder of a chunk directly into the
|
||||
* caller's buffer.
|
||||
* <p>
|
||||
* Return the number of bytes read.
|
||||
*/
|
||||
private int fastRead(byte[] b, int off, int len) throws IOException {
|
||||
|
||||
// assert state == STATE_READING_CHUNKS;
|
||||
|
||||
int remaining = chunkSize - chunkRead;
|
||||
int cnt = (remaining < len) ? remaining : len;
|
||||
if (cnt > 0) {
|
||||
int nread;
|
||||
try {
|
||||
nread = in.read(b, off, cnt);
|
||||
} catch (IOException e) {
|
||||
error = true;
|
||||
throw e;
|
||||
}
|
||||
if (nread > 0) {
|
||||
chunkRead += nread;
|
||||
if (chunkRead >= chunkSize) {
|
||||
state = STATE_AWAITING_CHUNK_EOL;
|
||||
}
|
||||
return nread;
|
||||
}
|
||||
error = true;
|
||||
throw new IOException("Premature EOF");
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Process any outstanding bytes that have already been read into
|
||||
* <code>rawData</code>.
|
||||
* <p>
|
||||
* The parsing of the chunked stream is performed as a state machine with
|
||||
* <code>state</code> representing the current state of the processing.
|
||||
* <p>
|
||||
* Returns when either all the outstanding bytes in rawData have been
|
||||
* processed or there is insufficient bytes available to continue
|
||||
* processing. When the latter occurs <code>rawPos</code> will not have
|
||||
* been updated and thus the processing can be restarted once further
|
||||
* bytes have been read into <code>rawData</code>.
|
||||
*/
|
||||
private void processRaw() throws IOException {
|
||||
int pos;
|
||||
int i;
|
||||
|
||||
while (state != STATE_DONE) {
|
||||
|
||||
switch (state) {
|
||||
|
||||
/**
|
||||
* We are awaiting a line with a chunk header
|
||||
*/
|
||||
case STATE_AWAITING_CHUNK_HEADER:
|
||||
/*
|
||||
* Find \n to indicate end of chunk header. If not found when there is
|
||||
* insufficient bytes in the raw buffer to parse a chunk header.
|
||||
*/
|
||||
pos = rawPos;
|
||||
while (pos < rawCount) {
|
||||
if (rawData[pos] == '\n') {
|
||||
break;
|
||||
}
|
||||
pos++;
|
||||
if ((pos - rawPos) >= MAX_CHUNK_HEADER_SIZE) {
|
||||
error = true;
|
||||
throw new IOException("Chunk header too long");
|
||||
}
|
||||
}
|
||||
if (pos >= rawCount) {
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Extract the chunk size from the header (ignoring extensions).
|
||||
*/
|
||||
String header = new String(rawData, rawPos, pos-rawPos+1, "US-ASCII");
|
||||
for (i=0; i < header.length(); i++) {
|
||||
if (Character.digit(header.charAt(i), 16) == -1)
|
||||
break;
|
||||
}
|
||||
try {
|
||||
chunkSize = Integer.parseInt(header.substring(0, i), 16);
|
||||
} catch (NumberFormatException e) {
|
||||
error = true;
|
||||
throw new IOException("Bogus chunk size");
|
||||
}
|
||||
|
||||
/*
|
||||
* Chunk has been parsed so move rawPos to first byte of chunk
|
||||
* data.
|
||||
*/
|
||||
rawPos = pos + 1;
|
||||
chunkRead = 0;
|
||||
|
||||
/*
|
||||
* A chunk size of 0 means EOF.
|
||||
*/
|
||||
if (chunkSize > 0) {
|
||||
state = STATE_READING_CHUNK;
|
||||
} else {
|
||||
state = STATE_AWAITING_TRAILERS;
|
||||
}
|
||||
break;
|
||||
|
||||
|
||||
/**
|
||||
* We are awaiting raw entity data (some may have already been
|
||||
* read). chunkSize is the size of the chunk; chunkRead is the
|
||||
* total read from the underlying stream to date.
|
||||
*/
|
||||
case STATE_READING_CHUNK :
|
||||
/* no data available yet */
|
||||
if (rawPos >= rawCount) {
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Compute the number of bytes of chunk data available in the
|
||||
* raw buffer.
|
||||
*/
|
||||
int copyLen = Math.min( chunkSize-chunkRead, rawCount-rawPos );
|
||||
|
||||
/*
|
||||
* Expand or compact chunkData if needed.
|
||||
*/
|
||||
if (chunkData.length < chunkCount + copyLen) {
|
||||
int cnt = chunkCount - chunkPos;
|
||||
if (chunkData.length < cnt + copyLen) {
|
||||
byte tmp[] = new byte[cnt + copyLen];
|
||||
System.arraycopy(chunkData, chunkPos, tmp, 0, cnt);
|
||||
chunkData = tmp;
|
||||
} else {
|
||||
System.arraycopy(chunkData, chunkPos, chunkData, 0, cnt);
|
||||
}
|
||||
chunkPos = 0;
|
||||
chunkCount = cnt;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy the chunk data into chunkData so that it's available
|
||||
* to the read methods.
|
||||
*/
|
||||
System.arraycopy(rawData, rawPos, chunkData, chunkCount, copyLen);
|
||||
rawPos += copyLen;
|
||||
chunkCount += copyLen;
|
||||
chunkRead += copyLen;
|
||||
|
||||
/*
|
||||
* If all the chunk has been copied into chunkData then the next
|
||||
* token should be CRLF.
|
||||
*/
|
||||
if (chunkSize - chunkRead <= 0) {
|
||||
state = STATE_AWAITING_CHUNK_EOL;
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
break;
|
||||
|
||||
|
||||
/**
|
||||
* Awaiting CRLF after the chunk
|
||||
*/
|
||||
case STATE_AWAITING_CHUNK_EOL:
|
||||
/* not available yet */
|
||||
if (rawPos + 1 >= rawCount) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (rawData[rawPos] != '\r') {
|
||||
error = true;
|
||||
throw new IOException("missing CR");
|
||||
}
|
||||
if (rawData[rawPos+1] != '\n') {
|
||||
error = true;
|
||||
throw new IOException("missing LF");
|
||||
}
|
||||
rawPos += 2;
|
||||
|
||||
/*
|
||||
* Move onto the next chunk
|
||||
*/
|
||||
state = STATE_AWAITING_CHUNK_HEADER;
|
||||
break;
|
||||
|
||||
|
||||
/**
|
||||
* Last chunk has been read so not we're waiting for optional
|
||||
* trailers.
|
||||
*/
|
||||
case STATE_AWAITING_TRAILERS:
|
||||
|
||||
/*
|
||||
* Do we have an entire line in the raw buffer?
|
||||
*/
|
||||
pos = rawPos;
|
||||
while (pos < rawCount) {
|
||||
if (rawData[pos] == '\n') {
|
||||
break;
|
||||
}
|
||||
pos++;
|
||||
}
|
||||
if (pos >= rawCount) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (pos == rawPos) {
|
||||
error = true;
|
||||
throw new IOException("LF should be proceeded by CR");
|
||||
}
|
||||
if (rawData[pos-1] != '\r') {
|
||||
error = true;
|
||||
throw new IOException("LF should be proceeded by CR");
|
||||
}
|
||||
|
||||
/*
|
||||
* Stream done so close underlying stream.
|
||||
*/
|
||||
if (pos == (rawPos + 1)) {
|
||||
|
||||
state = STATE_DONE;
|
||||
closeUnderlying();
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Extract any tailers and append them to the message
|
||||
* headers.
|
||||
*/
|
||||
String trailer = new String(rawData, rawPos, pos-rawPos, "US-ASCII");
|
||||
i = trailer.indexOf(':');
|
||||
if (i == -1) {
|
||||
throw new IOException("Malformed tailer - format should be key:value");
|
||||
}
|
||||
String key = (trailer.substring(0, i)).trim();
|
||||
String value = (trailer.substring(i+1, trailer.length())).trim();
|
||||
|
||||
responses.add(key, value);
|
||||
|
||||
/*
|
||||
* Move onto the next trailer.
|
||||
*/
|
||||
rawPos = pos+1;
|
||||
break;
|
||||
|
||||
} /* switch */
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Reads any available bytes from the underlying stream into
|
||||
* <code>rawData</code> and returns the number of bytes of
|
||||
* chunk data available in <code>chunkData</code> that the
|
||||
* application can read.
|
||||
*/
|
||||
private int readAheadNonBlocking() throws IOException {
|
||||
|
||||
/*
|
||||
* If there's anything available on the underlying stream then we read
|
||||
* it into the raw buffer and process it. Processing ensures that any
|
||||
* available chunk data is made available in chunkData.
|
||||
*/
|
||||
int avail = in.available();
|
||||
if (avail > 0) {
|
||||
|
||||
/* ensure that there is space in rawData to read the available */
|
||||
ensureRawAvailable(avail);
|
||||
|
||||
int nread;
|
||||
try {
|
||||
nread = in.read(rawData, rawCount, avail);
|
||||
} catch (IOException e) {
|
||||
error = true;
|
||||
throw e;
|
||||
}
|
||||
if (nread < 0) {
|
||||
error = true; /* premature EOF ? */
|
||||
return -1;
|
||||
}
|
||||
rawCount += nread;
|
||||
|
||||
/*
|
||||
* Process the raw bytes that have been read.
|
||||
*/
|
||||
processRaw();
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the number of chunked bytes available to read
|
||||
*/
|
||||
return chunkCount - chunkPos;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads from the underlying stream until there is chunk data
|
||||
* available in <code>chunkData</code> for the application to
|
||||
* read.
|
||||
*/
|
||||
private int readAheadBlocking() throws IOException {
|
||||
|
||||
do {
|
||||
/*
|
||||
* All of chunked response has been read to return EOF.
|
||||
*/
|
||||
if (state == STATE_DONE) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* We must read into the raw buffer so make sure there is space
|
||||
* available. We use a size of 32 to avoid too much chunk data
|
||||
* being read into the raw buffer.
|
||||
*/
|
||||
ensureRawAvailable(32);
|
||||
int nread;
|
||||
try {
|
||||
nread = in.read(rawData, rawCount, rawData.length-rawCount);
|
||||
} catch (IOException e) {
|
||||
error = true;
|
||||
throw e;
|
||||
}
|
||||
|
||||
/**
|
||||
* If we hit EOF it means there's a problem as we should never
|
||||
* attempt to read once the last chunk and trailers have been
|
||||
* received.
|
||||
*/
|
||||
if (nread < 0) {
|
||||
error = true;
|
||||
throw new IOException("Premature EOF");
|
||||
}
|
||||
|
||||
/**
|
||||
* Process the bytes from the underlying stream
|
||||
*/
|
||||
rawCount += nread;
|
||||
processRaw();
|
||||
|
||||
} while (chunkCount <= 0);
|
||||
|
||||
/*
|
||||
* Return the number of chunked bytes available to read
|
||||
*/
|
||||
return chunkCount - chunkPos;
|
||||
}
|
||||
|
||||
/**
|
||||
* Read ahead in either blocking or non-blocking mode. This method
|
||||
* is typically used when we run out of available bytes in
|
||||
* <code>chunkData</code> or we need to determine how many bytes
|
||||
* are available on the input stream.
|
||||
*/
|
||||
private int readAhead(boolean allowBlocking) throws IOException {
|
||||
|
||||
/*
|
||||
* Last chunk already received - return EOF
|
||||
*/
|
||||
if (state == STATE_DONE) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reset position/count if data in chunkData is exhausted.
|
||||
*/
|
||||
if (chunkPos >= chunkCount) {
|
||||
chunkCount = 0;
|
||||
chunkPos = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read ahead blocking or non-blocking
|
||||
*/
|
||||
if (allowBlocking) {
|
||||
return readAheadBlocking();
|
||||
} else {
|
||||
return readAheadNonBlocking();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a <code>ChunkedInputStream</code> and saves its arguments, for
|
||||
* later use.
|
||||
*
|
||||
* @param in the underlying input stream.
|
||||
* @param hc the HttpClient
|
||||
* @param responses the MessageHeader that should be populated with optional
|
||||
* trailers.
|
||||
*/
|
||||
public ChunkedInputStream(InputStream in, HttpClient hc, MessageHeader responses) throws IOException {
|
||||
|
||||
/* save arguments */
|
||||
this.in = in;
|
||||
this.responses = responses;
|
||||
this.hc = hc;
|
||||
|
||||
/*
|
||||
* Set our initial state to indicate that we are first starting to
|
||||
* look for a chunk header.
|
||||
*/
|
||||
state = STATE_AWAITING_CHUNK_HEADER;
|
||||
}
|
||||
|
||||
/**
|
||||
* See
|
||||
* the general contract of the <code>read</code>
|
||||
* method of <code>InputStream</code>.
|
||||
*
|
||||
* @return the next byte of data, or <code>-1</code> if the end of the
|
||||
* stream is reached.
|
||||
* @exception IOException if an I/O error occurs.
|
||||
* @see java.io.FilterInputStream#in
|
||||
*/
|
||||
public synchronized int read() throws IOException {
|
||||
ensureOpen();
|
||||
if (chunkPos >= chunkCount) {
|
||||
if (readAhead(true) <= 0) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
return chunkData[chunkPos++] & 0xff;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Reads bytes from this stream into the specified byte array, starting at
|
||||
* the given offset.
|
||||
*
|
||||
* @param b destination buffer.
|
||||
* @param off offset at which to start storing bytes.
|
||||
* @param len maximum number of bytes to read.
|
||||
* @return the number of bytes read, or <code>-1</code> if the end of
|
||||
* the stream has been reached.
|
||||
* @exception IOException if an I/O error occurs.
|
||||
*/
|
||||
public synchronized int read(byte b[], int off, int len)
|
||||
throws IOException
|
||||
{
|
||||
ensureOpen();
|
||||
if ((off < 0) || (off > b.length) || (len < 0) ||
|
||||
((off + len) > b.length) || ((off + len) < 0)) {
|
||||
throw new IndexOutOfBoundsException();
|
||||
} else if (len == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int avail = chunkCount - chunkPos;
|
||||
if (avail <= 0) {
|
||||
/*
|
||||
* Optimization: if we're in the middle of the chunk read
|
||||
* directly from the underlying stream into the caller's
|
||||
* buffer
|
||||
*/
|
||||
if (state == STATE_READING_CHUNK) {
|
||||
return fastRead( b, off, len );
|
||||
}
|
||||
|
||||
/*
|
||||
* We're not in the middle of a chunk so we must read ahead
|
||||
* until there is some chunk data available.
|
||||
*/
|
||||
avail = readAhead(true);
|
||||
if (avail < 0) {
|
||||
return -1; /* EOF */
|
||||
}
|
||||
}
|
||||
int cnt = (avail < len) ? avail : len;
|
||||
System.arraycopy(chunkData, chunkPos, b, off, cnt);
|
||||
chunkPos += cnt;
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of bytes that can be read from this input
|
||||
* stream without blocking.
|
||||
*
|
||||
* @return the number of bytes that can be read from this input
|
||||
* stream without blocking.
|
||||
* @exception IOException if an I/O error occurs.
|
||||
* @see java.io.FilterInputStream#in
|
||||
*/
|
||||
public synchronized int available() throws IOException {
|
||||
ensureOpen();
|
||||
|
||||
int avail = chunkCount - chunkPos;
|
||||
if(avail > 0) {
|
||||
return avail;
|
||||
}
|
||||
|
||||
avail = readAhead(false);
|
||||
|
||||
if (avail < 0) {
|
||||
return 0;
|
||||
} else {
|
||||
return avail;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the stream by either returning the connection to the
|
||||
* keep alive cache or closing the underlying stream.
|
||||
* <p>
|
||||
* If the chunked response hasn't been completely read we
|
||||
* try to "hurry" to the end of the response. If this is
|
||||
* possible (without blocking) then the connection can be
|
||||
* returned to the keep alive cache.
|
||||
*
|
||||
* @exception IOException if an I/O error occurs.
|
||||
*/
|
||||
public synchronized void close() throws IOException {
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
closeUnderlying();
|
||||
closed = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Hurry the input stream by reading everything from the underlying
|
||||
* stream. If the last chunk (and optional trailers) can be read without
|
||||
* blocking then the stream is considered hurried.
|
||||
* <p>
|
||||
* Note that if an error has occurred or we can't get to last chunk
|
||||
* without blocking then this stream can't be hurried and should be
|
||||
* closed.
|
||||
*/
|
||||
public synchronized boolean hurry() {
|
||||
if (in == null || error) {
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
readAhead(false);
|
||||
} catch (Exception e) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (error) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return (state == STATE_DONE);
|
||||
}
|
||||
|
||||
}
|
||||
300
jdkSrc/jdk8/sun/net/www/http/ChunkedOutputStream.java
Normal file
300
jdkSrc/jdk8/sun/net/www/http/ChunkedOutputStream.java
Normal file
@@ -0,0 +1,300 @@
|
||||
/*
|
||||
* Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package sun.net.www.http;
|
||||
|
||||
import java.io.*;
|
||||
|
||||
/**
|
||||
* OutputStream that sends the output to the underlying stream using chunked
|
||||
* encoding as specified in RFC 2068.
|
||||
*/
|
||||
public class ChunkedOutputStream extends PrintStream {
|
||||
|
||||
/* Default chunk size (including chunk header) if not specified */
|
||||
static final int DEFAULT_CHUNK_SIZE = 4096;
|
||||
private static final byte[] CRLF = {'\r', '\n'};
|
||||
private static final int CRLF_SIZE = CRLF.length;
|
||||
private static final byte[] FOOTER = CRLF;
|
||||
private static final int FOOTER_SIZE = CRLF_SIZE;
|
||||
private static final byte[] EMPTY_CHUNK_HEADER = getHeader(0);
|
||||
private static final int EMPTY_CHUNK_HEADER_SIZE = getHeaderSize(0);
|
||||
|
||||
/* internal buffer */
|
||||
private byte buf[];
|
||||
/* size of data (excluding footers and headers) already stored in buf */
|
||||
private int size;
|
||||
/* current index in buf (i.e. buf[count] */
|
||||
private int count;
|
||||
/* number of bytes to be filled up to complete a data chunk
|
||||
* currently being built */
|
||||
private int spaceInCurrentChunk;
|
||||
|
||||
/* underlying stream */
|
||||
private PrintStream out;
|
||||
|
||||
/* the chunk size we use */
|
||||
private int preferredChunkDataSize;
|
||||
private int preferedHeaderSize;
|
||||
private int preferredChunkGrossSize;
|
||||
/* header for a complete Chunk */
|
||||
private byte[] completeHeader;
|
||||
|
||||
/* return the size of the header for a particular chunk size */
|
||||
private static int getHeaderSize(int size) {
|
||||
return (Integer.toHexString(size)).length() + CRLF_SIZE;
|
||||
}
|
||||
|
||||
/* return a header for a particular chunk size */
|
||||
private static byte[] getHeader(int size){
|
||||
try {
|
||||
String hexStr = Integer.toHexString(size);
|
||||
byte[] hexBytes = hexStr.getBytes("US-ASCII");
|
||||
byte[] header = new byte[getHeaderSize(size)];
|
||||
for (int i=0; i<hexBytes.length; i++)
|
||||
header[i] = hexBytes[i];
|
||||
header[hexBytes.length] = CRLF[0];
|
||||
header[hexBytes.length+1] = CRLF[1];
|
||||
return header;
|
||||
} catch (java.io.UnsupportedEncodingException e) {
|
||||
/* This should never happen */
|
||||
throw new InternalError(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
public ChunkedOutputStream(PrintStream o) {
|
||||
this(o, DEFAULT_CHUNK_SIZE);
|
||||
}
|
||||
|
||||
public ChunkedOutputStream(PrintStream o, int size) {
|
||||
super(o);
|
||||
out = o;
|
||||
|
||||
if (size <= 0) {
|
||||
size = DEFAULT_CHUNK_SIZE;
|
||||
}
|
||||
|
||||
/* Adjust the size to cater for the chunk header - eg: if the
|
||||
* preferred chunk size is 1k this means the chunk size should
|
||||
* be 1017 bytes (differs by 7 from preferred size because of
|
||||
* 3 bytes for chunk size in hex and CRLF (header) and CRLF (footer)).
|
||||
*
|
||||
* If headerSize(adjusted_size) is shorter then headerSize(size)
|
||||
* then try to use the extra byte unless headerSize(adjusted_size+1)
|
||||
* increases back to headerSize(size)
|
||||
*/
|
||||
if (size > 0) {
|
||||
int adjusted_size = size - getHeaderSize(size) - FOOTER_SIZE;
|
||||
if (getHeaderSize(adjusted_size+1) < getHeaderSize(size)){
|
||||
adjusted_size++;
|
||||
}
|
||||
size = adjusted_size;
|
||||
}
|
||||
|
||||
if (size > 0) {
|
||||
preferredChunkDataSize = size;
|
||||
} else {
|
||||
preferredChunkDataSize = DEFAULT_CHUNK_SIZE -
|
||||
getHeaderSize(DEFAULT_CHUNK_SIZE) - FOOTER_SIZE;
|
||||
}
|
||||
|
||||
preferedHeaderSize = getHeaderSize(preferredChunkDataSize);
|
||||
preferredChunkGrossSize = preferedHeaderSize + preferredChunkDataSize
|
||||
+ FOOTER_SIZE;
|
||||
completeHeader = getHeader(preferredChunkDataSize);
|
||||
|
||||
/* start with an initial buffer */
|
||||
buf = new byte[preferredChunkGrossSize];
|
||||
reset();
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush a buffered, completed chunk to an underlying stream. If the data in
|
||||
* the buffer is insufficient to build up a chunk of "preferredChunkSize"
|
||||
* then the data do not get flushed unless flushAll is true. If flushAll is
|
||||
* true then the remaining data builds up a last chunk which size is smaller
|
||||
* than preferredChunkSize, and then the last chunk gets flushed to
|
||||
* underlying stream. If flushAll is true and there is no data in a buffer
|
||||
* at all then an empty chunk (containing a header only) gets flushed to
|
||||
* underlying stream.
|
||||
*/
|
||||
private void flush(boolean flushAll) {
|
||||
if (spaceInCurrentChunk == 0) {
|
||||
/* flush a completed chunk to underlying stream */
|
||||
out.write(buf, 0, preferredChunkGrossSize);
|
||||
out.flush();
|
||||
reset();
|
||||
} else if (flushAll){
|
||||
/* complete the last chunk and flush it to underlying stream */
|
||||
if (size > 0){
|
||||
/* adjust a header start index in case the header of the last
|
||||
* chunk is shorter then preferedHeaderSize */
|
||||
|
||||
int adjustedHeaderStartIndex = preferedHeaderSize -
|
||||
getHeaderSize(size);
|
||||
|
||||
/* write header */
|
||||
System.arraycopy(getHeader(size), 0, buf,
|
||||
adjustedHeaderStartIndex, getHeaderSize(size));
|
||||
|
||||
/* write footer */
|
||||
buf[count++] = FOOTER[0];
|
||||
buf[count++] = FOOTER[1];
|
||||
|
||||
//send the last chunk to underlying stream
|
||||
out.write(buf, adjustedHeaderStartIndex, count - adjustedHeaderStartIndex);
|
||||
} else {
|
||||
//send an empty chunk (containing just a header) to underlying stream
|
||||
out.write(EMPTY_CHUNK_HEADER, 0, EMPTY_CHUNK_HEADER_SIZE);
|
||||
}
|
||||
|
||||
out.flush();
|
||||
reset();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean checkError() {
|
||||
return out.checkError();
|
||||
}
|
||||
|
||||
/* Check that the output stream is still open */
|
||||
private void ensureOpen() {
|
||||
if (out == null)
|
||||
setError();
|
||||
}
|
||||
|
||||
/*
|
||||
* Writes data from b[] to an internal buffer and stores the data as data
|
||||
* chunks of a following format: {Data length in Hex}{CRLF}{data}{CRLF}
|
||||
* The size of the data is preferredChunkSize. As soon as a completed chunk
|
||||
* is read from b[] a process of reading from b[] suspends, the chunk gets
|
||||
* flushed to the underlying stream and then the reading process from b[]
|
||||
* continues. When there is no more sufficient data in b[] to build up a
|
||||
* chunk of preferredChunkSize size the data get stored as an incomplete
|
||||
* chunk of a following format: {space for data length}{CRLF}{data}
|
||||
* The size of the data is of course smaller than preferredChunkSize.
|
||||
*/
|
||||
@Override
|
||||
public synchronized void write(byte b[], int off, int len) {
|
||||
ensureOpen();
|
||||
if ((off < 0) || (off > b.length) || (len < 0) ||
|
||||
((off + len) > b.length) || ((off + len) < 0)) {
|
||||
throw new IndexOutOfBoundsException();
|
||||
} else if (len == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* if b[] contains enough data then one loop cycle creates one complete
|
||||
* data chunk with a header, body and a footer, and then flushes the
|
||||
* chunk to the underlying stream. Otherwise, the last loop cycle
|
||||
* creates incomplete data chunk with empty header and with no footer
|
||||
* and stores this incomplete chunk in an internal buffer buf[]
|
||||
*/
|
||||
int bytesToWrite = len;
|
||||
int inputIndex = off; /* the index of the byte[] currently being written */
|
||||
|
||||
do {
|
||||
/* enough data to complete a chunk */
|
||||
if (bytesToWrite >= spaceInCurrentChunk) {
|
||||
|
||||
/* header */
|
||||
for (int i=0; i<completeHeader.length; i++)
|
||||
buf[i] = completeHeader[i];
|
||||
|
||||
/* data */
|
||||
System.arraycopy(b, inputIndex, buf, count, spaceInCurrentChunk);
|
||||
inputIndex += spaceInCurrentChunk;
|
||||
bytesToWrite -= spaceInCurrentChunk;
|
||||
count += spaceInCurrentChunk;
|
||||
|
||||
/* footer */
|
||||
buf[count++] = FOOTER[0];
|
||||
buf[count++] = FOOTER[1];
|
||||
spaceInCurrentChunk = 0; //chunk is complete
|
||||
|
||||
flush(false);
|
||||
if (checkError()){
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* not enough data to build a chunk */
|
||||
else {
|
||||
/* header */
|
||||
/* do not write header if not enough bytes to build a chunk yet */
|
||||
|
||||
/* data */
|
||||
System.arraycopy(b, inputIndex, buf, count, bytesToWrite);
|
||||
count += bytesToWrite;
|
||||
size += bytesToWrite;
|
||||
spaceInCurrentChunk -= bytesToWrite;
|
||||
bytesToWrite = 0;
|
||||
|
||||
/* footer */
|
||||
/* do not write header if not enough bytes to build a chunk yet */
|
||||
}
|
||||
} while (bytesToWrite > 0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void write(int _b) {
|
||||
byte b[] = {(byte)_b};
|
||||
write(b, 0, 1);
|
||||
}
|
||||
|
||||
public synchronized void reset() {
|
||||
count = preferedHeaderSize;
|
||||
size = 0;
|
||||
spaceInCurrentChunk = preferredChunkDataSize;
|
||||
}
|
||||
|
||||
public int size() {
|
||||
return size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void close() {
|
||||
ensureOpen();
|
||||
|
||||
/* if we have buffer a chunked send it */
|
||||
if (size > 0) {
|
||||
flush(true);
|
||||
}
|
||||
|
||||
/* send a zero length chunk */
|
||||
flush(true);
|
||||
|
||||
/* don't close the underlying stream */
|
||||
out = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void flush() {
|
||||
ensureOpen();
|
||||
if (size > 0) {
|
||||
flush(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
171
jdkSrc/jdk8/sun/net/www/http/HttpCapture.java
Normal file
171
jdkSrc/jdk8/sun/net/www/http/HttpCapture.java
Normal file
@@ -0,0 +1,171 @@
|
||||
/*
|
||||
* Copyright (c) 2009, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
package sun.net.www.http;
|
||||
|
||||
import java.io.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.regex.*;
|
||||
import sun.net.NetProperties;
|
||||
import sun.util.logging.PlatformLogger;
|
||||
|
||||
/**
|
||||
* Main class of the HTTP traffic capture tool.
|
||||
* Captures are triggered by the sun.net.http.captureRules system property.
|
||||
* If set, it should point to a file containing the capture rules.
|
||||
* Format for the file is simple:
|
||||
* - 1 rule per line
|
||||
* - Lines starting with a # are considered comments and ignored
|
||||
* - a rule is a pair of a regular expression and file pattern, separated by a comma
|
||||
* - The regular expression is applied to URLs, if it matches, the traffic for
|
||||
* that URL will be captured in the associated file.
|
||||
* - if the file name contains a '%d', then that sequence will be replaced by a
|
||||
* unique random number for each URL. This allow for multi-threaded captures
|
||||
* of URLs matching the same pattern.
|
||||
* - Rules are checked in sequence, in the same order as in the file, until a
|
||||
* match is found or the end of the list is reached.
|
||||
*
|
||||
* Examples of rules:
|
||||
* www\.sun\.com , sun%d.log
|
||||
* yahoo\.com\/.*asf , yahoo.log
|
||||
*
|
||||
* @author jccollet
|
||||
*/
|
||||
public class HttpCapture {
|
||||
private File file = null;
|
||||
private boolean incoming = true;
|
||||
private BufferedWriter out = null;
|
||||
private static boolean initialized = false;
|
||||
private static volatile ArrayList<Pattern> patterns = null;
|
||||
private static volatile ArrayList<String> capFiles = null;
|
||||
|
||||
private static synchronized void init() {
|
||||
initialized = true;
|
||||
String rulesFile = java.security.AccessController.doPrivileged(
|
||||
new java.security.PrivilegedAction<String>() {
|
||||
public String run() {
|
||||
return NetProperties.get("sun.net.http.captureRules");
|
||||
}
|
||||
});
|
||||
if (rulesFile != null && !rulesFile.isEmpty()) {
|
||||
BufferedReader in;
|
||||
try {
|
||||
in = new BufferedReader(new FileReader(rulesFile));
|
||||
} catch (FileNotFoundException ex) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
String line = in.readLine();
|
||||
while (line != null) {
|
||||
line = line.trim();
|
||||
if (!line.startsWith("#")) {
|
||||
// skip line if it's a comment
|
||||
String[] s = line.split(",");
|
||||
if (s.length == 2) {
|
||||
if (patterns == null) {
|
||||
patterns = new ArrayList<Pattern>();
|
||||
capFiles = new ArrayList<String>();
|
||||
}
|
||||
patterns.add(Pattern.compile(s[0].trim()));
|
||||
capFiles.add(s[1].trim());
|
||||
}
|
||||
}
|
||||
line = in.readLine();
|
||||
}
|
||||
} catch (IOException ioe) {
|
||||
|
||||
} finally {
|
||||
try {
|
||||
in.close();
|
||||
} catch (IOException ex) {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static synchronized boolean isInitialized() {
|
||||
return initialized;
|
||||
}
|
||||
|
||||
private HttpCapture(File f, java.net.URL url) {
|
||||
file = f;
|
||||
try {
|
||||
out = new BufferedWriter(new FileWriter(file, true));
|
||||
out.write("URL: " + url + "\n");
|
||||
} catch (IOException ex) {
|
||||
PlatformLogger.getLogger(HttpCapture.class.getName()).severe(null, ex);
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized void sent(int c) throws IOException {
|
||||
if (incoming) {
|
||||
out.write("\n------>\n");
|
||||
incoming = false;
|
||||
out.flush();
|
||||
}
|
||||
out.write(c);
|
||||
}
|
||||
|
||||
public synchronized void received(int c) throws IOException {
|
||||
if (!incoming) {
|
||||
out.write("\n<------\n");
|
||||
incoming = true;
|
||||
out.flush();
|
||||
}
|
||||
out.write(c);
|
||||
}
|
||||
|
||||
public synchronized void flush() throws IOException {
|
||||
out.flush();
|
||||
}
|
||||
|
||||
public static HttpCapture getCapture(java.net.URL url) {
|
||||
if (!isInitialized()) {
|
||||
init();
|
||||
}
|
||||
if (patterns == null || patterns.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
String s = url.toString();
|
||||
for (int i = 0; i < patterns.size(); i++) {
|
||||
Pattern p = patterns.get(i);
|
||||
if (p.matcher(s).find()) {
|
||||
String f = capFiles.get(i);
|
||||
File fi;
|
||||
if (f.indexOf("%d") >= 0) {
|
||||
java.util.Random rand = new java.util.Random();
|
||||
do {
|
||||
String f2 = f.replace("%d", Integer.toString(rand.nextInt()));
|
||||
fi = new File(f2);
|
||||
} while (fi.exists());
|
||||
} else {
|
||||
fi = new File(f);
|
||||
}
|
||||
return new HttpCapture(fi, url);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
76
jdkSrc/jdk8/sun/net/www/http/HttpCaptureInputStream.java
Normal file
76
jdkSrc/jdk8/sun/net/www/http/HttpCaptureInputStream.java
Normal file
@@ -0,0 +1,76 @@
|
||||
/*
|
||||
* Copyright (c) 2009, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
package sun.net.www.http;
|
||||
import java.io.*;
|
||||
|
||||
/**
|
||||
* A Simple FilterInputStream subclass to capture HTTP traffic.
|
||||
* Every byte read is also passed to the HttpCapture class.
|
||||
*
|
||||
* @author jccollet
|
||||
*/
|
||||
public class HttpCaptureInputStream extends FilterInputStream {
|
||||
private HttpCapture capture = null;
|
||||
|
||||
public HttpCaptureInputStream(InputStream in, HttpCapture cap) {
|
||||
super(in);
|
||||
capture = cap;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read() throws IOException {
|
||||
int i = super.read();
|
||||
capture.received(i);
|
||||
return i;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
try {
|
||||
capture.flush();
|
||||
} catch (IOException iOException) {
|
||||
}
|
||||
super.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(byte[] b) throws IOException {
|
||||
int ret = super.read(b);
|
||||
for (int i = 0; i < ret; i++) {
|
||||
capture.received(b[i]);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(byte[] b, int off, int len) throws IOException {
|
||||
int ret = super.read(b, off, len);
|
||||
for (int i = 0; i < ret; i++) {
|
||||
capture.received(b[off+i]);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
73
jdkSrc/jdk8/sun/net/www/http/HttpCaptureOutputStream.java
Normal file
73
jdkSrc/jdk8/sun/net/www/http/HttpCaptureOutputStream.java
Normal file
@@ -0,0 +1,73 @@
|
||||
/*
|
||||
* Copyright (c) 2009, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
package sun.net.www.http;
|
||||
import java.io.*;
|
||||
|
||||
/**
|
||||
* A Simple FilterOutputStream subclass to capture HTTP traffic.
|
||||
* Every byte written is also passed to the HttpCapture class.
|
||||
*
|
||||
* @author jccollet
|
||||
*/
|
||||
public class HttpCaptureOutputStream extends FilterOutputStream {
|
||||
private HttpCapture capture = null;
|
||||
|
||||
public HttpCaptureOutputStream(OutputStream out, HttpCapture cap) {
|
||||
super(out);
|
||||
capture = cap;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(int b) throws IOException {
|
||||
capture.sent(b);
|
||||
out.write(b);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(byte[] ba) throws IOException {
|
||||
for (byte b : ba) {
|
||||
capture.sent(b);
|
||||
}
|
||||
out.write(ba);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(byte[] b, int off, int len) throws IOException {
|
||||
for (int i = off; i < len; i++) {
|
||||
capture.sent(b[i]);
|
||||
}
|
||||
out.write(b, off, len);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void flush() throws IOException {
|
||||
try {
|
||||
capture.flush();
|
||||
} catch (IOException iOException) {
|
||||
}
|
||||
super.flush();
|
||||
}
|
||||
}
|
||||
1104
jdkSrc/jdk8/sun/net/www/http/HttpClient.java
Normal file
1104
jdkSrc/jdk8/sun/net/www/http/HttpClient.java
Normal file
File diff suppressed because it is too large
Load Diff
40
jdkSrc/jdk8/sun/net/www/http/Hurryable.java
Normal file
40
jdkSrc/jdk8/sun/net/www/http/Hurryable.java
Normal file
@@ -0,0 +1,40 @@
|
||||
/*
|
||||
* Copyright (c) 2001, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package sun.net.www.http;
|
||||
|
||||
/**
|
||||
* A <code>Hurryable</code> is a class that has been instructed to complete
|
||||
* its input processing so as to make resource associated with that input
|
||||
* available to others.
|
||||
*/
|
||||
public interface Hurryable {
|
||||
|
||||
/**
|
||||
* @return a <code>boolean</code> indicating if the stream has been
|
||||
* hurried or not.
|
||||
*/
|
||||
boolean hurry();
|
||||
|
||||
}
|
||||
426
jdkSrc/jdk8/sun/net/www/http/KeepAliveCache.java
Normal file
426
jdkSrc/jdk8/sun/net/www/http/KeepAliveCache.java
Normal file
@@ -0,0 +1,426 @@
|
||||
/*
|
||||
* Copyright (c) 1996, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
package sun.net.www.http;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.NotSerializableException;
|
||||
import java.io.ObjectInputStream;
|
||||
import java.io.ObjectOutputStream;
|
||||
import java.net.URL;
|
||||
import java.security.AccessController;
|
||||
import java.security.PrivilegedAction;
|
||||
import java.util.ArrayDeque;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
||||
import sun.security.action.GetIntegerAction;
|
||||
import sun.net.www.protocol.http.HttpURLConnection;
|
||||
import sun.util.logging.PlatformLogger;
|
||||
|
||||
/**
|
||||
* A class that implements a cache of idle Http connections for keep-alive
|
||||
*
|
||||
* @author Stephen R. Pietrowicz (NCSA)
|
||||
* @author Dave Brown
|
||||
*/
|
||||
public class KeepAliveCache
|
||||
extends HashMap<KeepAliveKey, ClientVector>
|
||||
implements Runnable {
|
||||
private static final long serialVersionUID = -2937172892064557949L;
|
||||
|
||||
// Keep alive time set according to priority specified here:
|
||||
// 1. If server specifies a time with a Keep-Alive header
|
||||
// 2. If user specifies a time with system property below
|
||||
// 3. Default values which depend on proxy vs server and whether
|
||||
// a Connection: keep-alive header was sent by server
|
||||
|
||||
// name suffixed with "server" or "proxy"
|
||||
private static final String keepAliveProp = "http.keepAlive.time.";
|
||||
|
||||
private static final int userKeepAliveServer;
|
||||
private static final int userKeepAliveProxy;
|
||||
|
||||
static final PlatformLogger logger = HttpURLConnection.getHttpLogger();
|
||||
|
||||
@SuppressWarnings("removal")
|
||||
static int getUserKeepAliveSeconds(String type) {
|
||||
int v = AccessController.doPrivileged(
|
||||
new GetIntegerAction(keepAliveProp+type, -1)).intValue();
|
||||
return v < -1 ? -1 : v;
|
||||
}
|
||||
|
||||
static {
|
||||
userKeepAliveServer = getUserKeepAliveSeconds("server");
|
||||
userKeepAliveProxy = getUserKeepAliveSeconds("proxy");
|
||||
}
|
||||
|
||||
/* maximum # keep-alive connections to maintain at once
|
||||
* This should be 2 by the HTTP spec, but because we don't support pipe-lining
|
||||
* a larger value is more appropriate. So we now set a default of 5, and the value
|
||||
* refers to the number of idle connections per destination (in the cache) only.
|
||||
* It can be reset by setting system property "http.maxConnections".
|
||||
*/
|
||||
static final int MAX_CONNECTIONS = 5;
|
||||
static int result = -1;
|
||||
static int getMaxConnections() {
|
||||
if (result == -1) {
|
||||
result = AccessController.doPrivileged(
|
||||
new GetIntegerAction("http.maxConnections", MAX_CONNECTIONS))
|
||||
.intValue();
|
||||
if (result <= 0) {
|
||||
result = MAX_CONNECTIONS;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
static final int LIFETIME = 5000;
|
||||
|
||||
private Thread keepAliveTimer = null;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
*/
|
||||
public KeepAliveCache() {}
|
||||
|
||||
/**
|
||||
* Register this URL and HttpClient (that supports keep-alive) with the cache
|
||||
* @param url The URL contains info about the host and port
|
||||
* @param http The HttpClient to be cached
|
||||
*/
|
||||
public void put(final URL url, Object obj, HttpClient http) {
|
||||
// this method may need to close an HttpClient, either because
|
||||
// it is not cacheable, or because the cache is at its capacity.
|
||||
// In the latter case, we close the least recently used client.
|
||||
// The client to close is stored in oldClient, and is closed
|
||||
// after cacheLock is released.
|
||||
HttpClient oldClient = null;
|
||||
synchronized (this) {
|
||||
boolean startThread = (keepAliveTimer == null);
|
||||
if (!startThread) {
|
||||
if (!keepAliveTimer.isAlive()) {
|
||||
startThread = true;
|
||||
}
|
||||
}
|
||||
if (startThread) {
|
||||
clear();
|
||||
/* Unfortunately, we can't always believe the keep-alive timeout we got
|
||||
* back from the server. If I'm connected through a Netscape proxy
|
||||
* to a server that sent me a keep-alive
|
||||
* time of 15 sec, the proxy unilaterally terminates my connection
|
||||
* The robustness to get around this is in HttpClient.parseHTTP()
|
||||
*/
|
||||
final KeepAliveCache cache = this;
|
||||
AccessController.doPrivileged(new PrivilegedAction<Void>() {
|
||||
public Void run() {
|
||||
// We want to create the Keep-Alive-Timer in the
|
||||
// system threadgroup
|
||||
ThreadGroup grp = Thread.currentThread().getThreadGroup();
|
||||
ThreadGroup parent = null;
|
||||
while ((parent = grp.getParent()) != null) {
|
||||
grp = parent;
|
||||
}
|
||||
|
||||
keepAliveTimer = new Thread(grp, cache, "Keep-Alive-Timer");
|
||||
keepAliveTimer.setDaemon(true);
|
||||
keepAliveTimer.setPriority(Thread.MAX_PRIORITY - 2);
|
||||
// Set the context class loader to null in order to avoid
|
||||
// keeping a strong reference to an application classloader.
|
||||
keepAliveTimer.setContextClassLoader(null);
|
||||
keepAliveTimer.start();
|
||||
return null;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
KeepAliveKey key = new KeepAliveKey(url, obj);
|
||||
ClientVector v = super.get(key);
|
||||
|
||||
if (v == null) {
|
||||
int keepAliveTimeout = http.getKeepAliveTimeout();
|
||||
if (keepAliveTimeout == 0) {
|
||||
keepAliveTimeout = getUserKeepAlive(http.getUsingProxy());
|
||||
if (keepAliveTimeout == -1) {
|
||||
// same default for server and proxy
|
||||
keepAliveTimeout = 5;
|
||||
}
|
||||
} else if (keepAliveTimeout == -1) {
|
||||
keepAliveTimeout = getUserKeepAlive(http.getUsingProxy());
|
||||
if (keepAliveTimeout == -1) {
|
||||
// different default for server and proxy
|
||||
keepAliveTimeout = http.getUsingProxy() ? 60 : 5;
|
||||
}
|
||||
} else if (keepAliveTimeout == -2) {
|
||||
keepAliveTimeout = 0;
|
||||
}
|
||||
// at this point keepAliveTimeout is the number of seconds to keep
|
||||
// alive, which could be 0, if the user specified 0 for the property
|
||||
assert keepAliveTimeout >= 0;
|
||||
if (keepAliveTimeout == 0) {
|
||||
oldClient = http;
|
||||
} else {
|
||||
v = new ClientVector(keepAliveTimeout * 1000);
|
||||
v.put(http);
|
||||
super.put(key, v);
|
||||
}
|
||||
} else {
|
||||
oldClient = v.put(http);
|
||||
}
|
||||
}
|
||||
// close after releasing locks
|
||||
if (oldClient != null) {
|
||||
oldClient.closeServer();
|
||||
}
|
||||
}
|
||||
|
||||
// returns the keep alive set by user in system property or -1 if not set
|
||||
private static int getUserKeepAlive(boolean isProxy) {
|
||||
return isProxy ? userKeepAliveProxy : userKeepAliveServer;
|
||||
}
|
||||
|
||||
/* remove an obsolete HttpClient from its VectorCache */
|
||||
public synchronized void remove(HttpClient h, Object obj) {
|
||||
KeepAliveKey key = new KeepAliveKey(h.url, obj);
|
||||
ClientVector v = super.get(key);
|
||||
if (v != null) {
|
||||
v.remove(h);
|
||||
if (v.isEmpty()) {
|
||||
removeVector(key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* called by a clientVector thread when all its connections have timed out
|
||||
* and that vector of connections should be removed.
|
||||
*/
|
||||
synchronized void removeVector(KeepAliveKey k) {
|
||||
super.remove(k);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check to see if this URL has a cached HttpClient
|
||||
*/
|
||||
public synchronized HttpClient get(URL url, Object obj) {
|
||||
KeepAliveKey key = new KeepAliveKey(url, obj);
|
||||
ClientVector v = super.get(key);
|
||||
if (v == null) { // nothing in cache yet
|
||||
return null;
|
||||
}
|
||||
return v.get();
|
||||
}
|
||||
|
||||
/* Sleeps for an alloted timeout, then checks for timed out connections.
|
||||
* Errs on the side of caution (leave connections idle for a relatively
|
||||
* short time).
|
||||
*/
|
||||
@Override
|
||||
public void run() {
|
||||
do {
|
||||
try {
|
||||
Thread.sleep(LIFETIME);
|
||||
} catch (InterruptedException e) {}
|
||||
List<HttpClient> closeList = null;
|
||||
|
||||
// Remove all outdated HttpClients.
|
||||
synchronized (this) {
|
||||
long currentTime = System.currentTimeMillis();
|
||||
List<KeepAliveKey> keysToRemove = new ArrayList<>();
|
||||
|
||||
for (KeepAliveKey key : keySet()) {
|
||||
ClientVector v = get(key);
|
||||
synchronized (v) {
|
||||
KeepAliveEntry e = v.peekLast();
|
||||
while (e != null) {
|
||||
if ((currentTime - e.idleStartTime) > v.nap) {
|
||||
v.pollLast();
|
||||
if (closeList == null) {
|
||||
closeList = new ArrayList<>();
|
||||
}
|
||||
closeList.add(e.hc);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
e = v.peekLast();
|
||||
}
|
||||
|
||||
if (v.isEmpty()) {
|
||||
keysToRemove.add(key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (KeepAliveKey key : keysToRemove) {
|
||||
removeVector(key);
|
||||
}
|
||||
}
|
||||
// close connections outside cacheLock
|
||||
if (closeList != null) {
|
||||
for (HttpClient hc : closeList) {
|
||||
hc.closeServer();
|
||||
}
|
||||
}
|
||||
} while (!isEmpty());
|
||||
}
|
||||
|
||||
/*
|
||||
* Do not serialize this class!
|
||||
*/
|
||||
private void writeObject(ObjectOutputStream stream) throws IOException {
|
||||
throw new NotSerializableException();
|
||||
}
|
||||
|
||||
private void readObject(ObjectInputStream stream)
|
||||
throws IOException, ClassNotFoundException
|
||||
{
|
||||
throw new NotSerializableException();
|
||||
}
|
||||
}
|
||||
|
||||
/* LIFO order for reusing HttpClients. Most recent entries at the front.
|
||||
* If > maxConns are in use, discard oldest.
|
||||
*/
|
||||
class ClientVector extends ArrayDeque<KeepAliveEntry> {
|
||||
private static final long serialVersionUID = -8680532108106489459L;
|
||||
|
||||
// sleep time in milliseconds, before cache clear
|
||||
int nap;
|
||||
|
||||
ClientVector(int nap) {
|
||||
this.nap = nap;
|
||||
}
|
||||
|
||||
synchronized HttpClient get() {
|
||||
// check the most recent connection, use if still valid
|
||||
KeepAliveEntry e = peekFirst();
|
||||
if (e == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
long currentTime = System.currentTimeMillis();
|
||||
if ((currentTime - e.idleStartTime) > nap) {
|
||||
return null; // all connections stale - will be cleaned up later
|
||||
} else {
|
||||
pollFirst();
|
||||
if (KeepAliveCache.logger.isLoggable(PlatformLogger.Level.FINEST)) {
|
||||
String msg = "cached HttpClient was idle for "
|
||||
+ Long.toString(currentTime - e.idleStartTime);
|
||||
KeepAliveCache.logger.finest(msg);
|
||||
}
|
||||
return e.hc;
|
||||
}
|
||||
}
|
||||
|
||||
/* return a still valid, unused HttpClient */
|
||||
synchronized HttpClient put(HttpClient h) {
|
||||
HttpClient staleClient = null;
|
||||
assert KeepAliveCache.getMaxConnections() > 0;
|
||||
if (size() >= KeepAliveCache.getMaxConnections()) {
|
||||
// remove oldest connection
|
||||
staleClient = removeLast().hc;
|
||||
}
|
||||
addFirst(new KeepAliveEntry(h, System.currentTimeMillis()));
|
||||
// close after releasing the locks
|
||||
return staleClient;
|
||||
}
|
||||
|
||||
/* remove an HttpClient */
|
||||
synchronized boolean remove(HttpClient h) {
|
||||
for (KeepAliveEntry curr : this) {
|
||||
if (curr.hc == h) {
|
||||
return super.remove(curr);
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Do not serialize this class!
|
||||
*/
|
||||
private void writeObject(ObjectOutputStream stream) throws IOException {
|
||||
throw new NotSerializableException();
|
||||
}
|
||||
|
||||
private void readObject(ObjectInputStream stream)
|
||||
throws IOException, ClassNotFoundException
|
||||
{
|
||||
throw new NotSerializableException();
|
||||
}
|
||||
}
|
||||
|
||||
class KeepAliveKey {
|
||||
private final String protocol;
|
||||
private final String host;
|
||||
private final int port;
|
||||
private final Object obj; // additional key, such as socketfactory
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
*
|
||||
* @param url the URL containing the protocol, host and port information
|
||||
*/
|
||||
public KeepAliveKey(URL url, Object obj) {
|
||||
this.protocol = url.getProtocol();
|
||||
this.host = url.getHost();
|
||||
this.port = url.getPort();
|
||||
this.obj = obj;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine whether or not two objects of this type are equal
|
||||
*/
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if ((obj instanceof KeepAliveKey) == false)
|
||||
return false;
|
||||
KeepAliveKey kae = (KeepAliveKey)obj;
|
||||
return host.equals(kae.host)
|
||||
&& (port == kae.port)
|
||||
&& protocol.equals(kae.protocol)
|
||||
&& this.obj == kae.obj;
|
||||
}
|
||||
|
||||
/**
|
||||
* The hashCode() for this object is the string hashCode() of
|
||||
* concatenation of the protocol, host name and port.
|
||||
*/
|
||||
@Override
|
||||
public int hashCode() {
|
||||
String str = protocol+host+port;
|
||||
return this.obj == null? str.hashCode() :
|
||||
str.hashCode() + this.obj.hashCode();
|
||||
}
|
||||
}
|
||||
|
||||
class KeepAliveEntry {
|
||||
final HttpClient hc;
|
||||
final long idleStartTime;
|
||||
|
||||
KeepAliveEntry(HttpClient hc, long idleStartTime) {
|
||||
this.hc = hc;
|
||||
this.idleStartTime = idleStartTime;
|
||||
}
|
||||
}
|
||||
56
jdkSrc/jdk8/sun/net/www/http/KeepAliveCleanerEntry.java
Normal file
56
jdkSrc/jdk8/sun/net/www/http/KeepAliveCleanerEntry.java
Normal file
@@ -0,0 +1,56 @@
|
||||
/*
|
||||
* Copyright (c) 1996, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
package sun.net.www.http;
|
||||
|
||||
import java.io.*;
|
||||
|
||||
class KeepAliveCleanerEntry
|
||||
{
|
||||
KeepAliveStream kas;
|
||||
HttpClient hc;
|
||||
|
||||
public KeepAliveCleanerEntry(KeepAliveStream kas, HttpClient hc) {
|
||||
this.kas = kas;
|
||||
this.hc = hc;
|
||||
}
|
||||
|
||||
protected KeepAliveStream getKeepAliveStream() {
|
||||
return kas;
|
||||
}
|
||||
|
||||
protected HttpClient getHttpClient() {
|
||||
return hc;
|
||||
}
|
||||
|
||||
protected void setQueuedForCleanup() {
|
||||
kas.queuedForCleanup = true;
|
||||
}
|
||||
|
||||
protected boolean getQueuedForCleanup() {
|
||||
return kas.queuedForCleanup;
|
||||
}
|
||||
|
||||
}
|
||||
205
jdkSrc/jdk8/sun/net/www/http/KeepAliveStream.java
Normal file
205
jdkSrc/jdk8/sun/net/www/http/KeepAliveStream.java
Normal file
@@ -0,0 +1,205 @@
|
||||
/*
|
||||
* Copyright (c) 1996, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
package sun.net.www.http;
|
||||
|
||||
import java.io.*;
|
||||
import sun.net.ProgressSource;
|
||||
import sun.net.www.MeteredStream;
|
||||
|
||||
/**
|
||||
* A stream that has the property of being able to be kept alive for
|
||||
* multiple downloads from the same server.
|
||||
*
|
||||
* @author Stephen R. Pietrowicz (NCSA)
|
||||
* @author Dave Brown
|
||||
*/
|
||||
public
|
||||
class KeepAliveStream extends MeteredStream implements Hurryable {
|
||||
|
||||
// instance variables
|
||||
HttpClient hc;
|
||||
|
||||
boolean hurried;
|
||||
|
||||
// has this KeepAliveStream been put on the queue for asynchronous cleanup.
|
||||
protected boolean queuedForCleanup = false;
|
||||
|
||||
private static final KeepAliveStreamCleaner queue = new KeepAliveStreamCleaner();
|
||||
private static Thread cleanerThread; // null
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
*/
|
||||
public KeepAliveStream(InputStream is, ProgressSource pi, long expected, HttpClient hc) {
|
||||
super(is, pi, expected);
|
||||
this.hc = hc;
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempt to cache this connection
|
||||
*/
|
||||
public void close() throws IOException {
|
||||
// If the inputstream is closed already, just return.
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
|
||||
// If this stream has already been queued for cleanup.
|
||||
if (queuedForCleanup) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Skip past the data that's left in the Inputstream because
|
||||
// some sort of error may have occurred.
|
||||
// Do this ONLY if the skip won't block. The stream may have
|
||||
// been closed at the beginning of a big file and we don't want
|
||||
// to hang around for nothing. So if we can't skip without blocking
|
||||
// we just close the socket and, therefore, terminate the keepAlive
|
||||
// NOTE: Don't close super class
|
||||
try {
|
||||
if (expected > count) {
|
||||
long nskip = expected - count;
|
||||
if (nskip <= available()) {
|
||||
do {} while ((nskip = (expected - count)) > 0L
|
||||
&& skip(Math.min(nskip, available())) > 0L);
|
||||
} else if (expected <= KeepAliveStreamCleaner.MAX_DATA_REMAINING && !hurried) {
|
||||
//put this KeepAliveStream on the queue so that the data remaining
|
||||
//on the socket can be cleanup asyncronously.
|
||||
queueForCleanup(new KeepAliveCleanerEntry(this, hc));
|
||||
} else {
|
||||
hc.closeServer();
|
||||
}
|
||||
}
|
||||
if (!closed && !hurried && !queuedForCleanup) {
|
||||
hc.finished();
|
||||
}
|
||||
} finally {
|
||||
if (pi != null)
|
||||
pi.finishTracking();
|
||||
|
||||
if (!queuedForCleanup) {
|
||||
// nulling out the underlying inputstream as well as
|
||||
// httpClient to let gc collect the memories faster
|
||||
in = null;
|
||||
hc = null;
|
||||
closed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* we explicitly do not support mark/reset */
|
||||
|
||||
public boolean markSupported() {
|
||||
return false;
|
||||
}
|
||||
|
||||
public void mark(int limit) {}
|
||||
|
||||
public void reset() throws IOException {
|
||||
throw new IOException("mark/reset not supported");
|
||||
}
|
||||
|
||||
public synchronized boolean hurry() {
|
||||
try {
|
||||
/* CASE 0: we're actually already done */
|
||||
if (closed || count >= expected) {
|
||||
return false;
|
||||
} else if (in.available() < (expected - count)) {
|
||||
/* CASE I: can't meet the demand */
|
||||
return false;
|
||||
} else {
|
||||
/* CASE II: fill our internal buffer
|
||||
* Remind: possibly check memory here
|
||||
*/
|
||||
int size = (int) (expected - count);
|
||||
byte[] buf = new byte[size];
|
||||
DataInputStream dis = new DataInputStream(in);
|
||||
dis.readFully(buf);
|
||||
in = new ByteArrayInputStream(buf);
|
||||
hurried = true;
|
||||
return true;
|
||||
}
|
||||
} catch (IOException e) {
|
||||
// e.printStackTrace();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private static void queueForCleanup(KeepAliveCleanerEntry kace) {
|
||||
synchronized(queue) {
|
||||
if(!kace.getQueuedForCleanup()) {
|
||||
if (!queue.offer(kace)) {
|
||||
kace.getHttpClient().closeServer();
|
||||
return;
|
||||
}
|
||||
|
||||
kace.setQueuedForCleanup();
|
||||
queue.notifyAll();
|
||||
}
|
||||
|
||||
boolean startCleanupThread = (cleanerThread == null);
|
||||
if (!startCleanupThread) {
|
||||
if (!cleanerThread.isAlive()) {
|
||||
startCleanupThread = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (startCleanupThread) {
|
||||
java.security.AccessController.doPrivileged(
|
||||
new java.security.PrivilegedAction<Void>() {
|
||||
public Void run() {
|
||||
// We want to create the Keep-Alive-SocketCleaner in the
|
||||
// system threadgroup
|
||||
ThreadGroup grp = Thread.currentThread().getThreadGroup();
|
||||
ThreadGroup parent = null;
|
||||
while ((parent = grp.getParent()) != null) {
|
||||
grp = parent;
|
||||
}
|
||||
|
||||
cleanerThread = new Thread(grp, queue, "Keep-Alive-SocketCleaner");
|
||||
cleanerThread.setDaemon(true);
|
||||
cleanerThread.setPriority(Thread.MAX_PRIORITY - 2);
|
||||
// Set the context class loader to null in order to avoid
|
||||
// keeping a strong reference to an application classloader.
|
||||
cleanerThread.setContextClassLoader(null);
|
||||
cleanerThread.start();
|
||||
return null;
|
||||
}
|
||||
});
|
||||
}
|
||||
} // queue
|
||||
}
|
||||
|
||||
protected long remainingToRead() {
|
||||
return expected - count;
|
||||
}
|
||||
|
||||
protected void setClosed() {
|
||||
in = null;
|
||||
hc = null;
|
||||
closed = true;
|
||||
}
|
||||
}
|
||||
155
jdkSrc/jdk8/sun/net/www/http/KeepAliveStreamCleaner.java
Normal file
155
jdkSrc/jdk8/sun/net/www/http/KeepAliveStreamCleaner.java
Normal file
@@ -0,0 +1,155 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2008, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
package sun.net.www.http;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.LinkedList;
|
||||
import sun.net.NetProperties;
|
||||
import java.security.AccessController;
|
||||
import java.security.PrivilegedAction;
|
||||
|
||||
/**
|
||||
* This class is used to cleanup any remaining data that may be on a KeepAliveStream
|
||||
* so that the connection can be cached in the KeepAliveCache.
|
||||
* Instances of this class can be used as a FIFO queue for KeepAliveCleanerEntry objects.
|
||||
* Executing this Runnable removes each KeepAliveCleanerEntry from the Queue, reads
|
||||
* the reamining bytes on its KeepAliveStream, and if successful puts the connection in
|
||||
* the KeepAliveCache.
|
||||
*
|
||||
* @author Chris Hegarty
|
||||
*/
|
||||
|
||||
@SuppressWarnings("serial") // never serialized
|
||||
class KeepAliveStreamCleaner
|
||||
extends LinkedList<KeepAliveCleanerEntry>
|
||||
implements Runnable
|
||||
{
|
||||
// maximum amount of remaining data that we will try to cleanup
|
||||
protected static int MAX_DATA_REMAINING = 512;
|
||||
|
||||
// maximum amount of KeepAliveStreams to be queued
|
||||
protected static int MAX_CAPACITY = 10;
|
||||
|
||||
// timeout for both socket and poll on the queue
|
||||
protected static final int TIMEOUT = 5000;
|
||||
|
||||
// max retries for skipping data
|
||||
private static final int MAX_RETRIES = 5;
|
||||
|
||||
static {
|
||||
final String maxDataKey = "http.KeepAlive.remainingData";
|
||||
int maxData = AccessController.doPrivileged(
|
||||
new PrivilegedAction<Integer>() {
|
||||
public Integer run() {
|
||||
return NetProperties.getInteger(maxDataKey, MAX_DATA_REMAINING);
|
||||
}}).intValue() * 1024;
|
||||
MAX_DATA_REMAINING = maxData;
|
||||
|
||||
final String maxCapacityKey = "http.KeepAlive.queuedConnections";
|
||||
int maxCapacity = AccessController.doPrivileged(
|
||||
new PrivilegedAction<Integer>() {
|
||||
public Integer run() {
|
||||
return NetProperties.getInteger(maxCapacityKey, MAX_CAPACITY);
|
||||
}}).intValue();
|
||||
MAX_CAPACITY = maxCapacity;
|
||||
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean offer(KeepAliveCleanerEntry e) {
|
||||
if (size() >= MAX_CAPACITY)
|
||||
return false;
|
||||
|
||||
return super.offer(e);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run()
|
||||
{
|
||||
KeepAliveCleanerEntry kace = null;
|
||||
|
||||
do {
|
||||
try {
|
||||
synchronized(this) {
|
||||
long before = System.currentTimeMillis();
|
||||
long timeout = TIMEOUT;
|
||||
while ((kace = poll()) == null) {
|
||||
this.wait(timeout);
|
||||
|
||||
long after = System.currentTimeMillis();
|
||||
long elapsed = after - before;
|
||||
if (elapsed > timeout) {
|
||||
/* one last try */
|
||||
kace = poll();
|
||||
break;
|
||||
}
|
||||
before = after;
|
||||
timeout -= elapsed;
|
||||
}
|
||||
}
|
||||
|
||||
if(kace == null)
|
||||
break;
|
||||
|
||||
KeepAliveStream kas = kace.getKeepAliveStream();
|
||||
|
||||
if (kas != null) {
|
||||
synchronized(kas) {
|
||||
HttpClient hc = kace.getHttpClient();
|
||||
try {
|
||||
if (hc != null && !hc.isInKeepAliveCache()) {
|
||||
int oldTimeout = hc.getReadTimeout();
|
||||
hc.setReadTimeout(TIMEOUT);
|
||||
long remainingToRead = kas.remainingToRead();
|
||||
if (remainingToRead > 0) {
|
||||
long n = 0;
|
||||
int retries = 0;
|
||||
while (n < remainingToRead && retries < MAX_RETRIES) {
|
||||
remainingToRead = remainingToRead - n;
|
||||
n = kas.skip(remainingToRead);
|
||||
if (n == 0)
|
||||
retries++;
|
||||
}
|
||||
remainingToRead = remainingToRead - n;
|
||||
}
|
||||
if (remainingToRead == 0) {
|
||||
hc.setReadTimeout(oldTimeout);
|
||||
hc.finished();
|
||||
} else
|
||||
hc.closeServer();
|
||||
}
|
||||
} catch (IOException ioe) {
|
||||
hc.closeServer();
|
||||
} finally {
|
||||
kas.setClosed();
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (InterruptedException ie) { }
|
||||
} while (kace != null);
|
||||
}
|
||||
}
|
||||
105
jdkSrc/jdk8/sun/net/www/http/PosterOutputStream.java
Normal file
105
jdkSrc/jdk8/sun/net/www/http/PosterOutputStream.java
Normal file
@@ -0,0 +1,105 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
package sun.net.www.http;
|
||||
|
||||
import java.io.*;
|
||||
import java.net.*;
|
||||
|
||||
/**
|
||||
* Instances of this class are returned to applications for the purpose of
|
||||
* sending user data for a HTTP request (excluding TRACE). This class is used
|
||||
* when the content-length will be specified in the header of the request.
|
||||
* The semantics of ByteArrayOutputStream are extended so that
|
||||
* when close() is called, it is no longer possible to write
|
||||
* additional data to the stream. From this point the content length of
|
||||
* the request is fixed and cannot change.
|
||||
*
|
||||
* @author Michael McMahon
|
||||
*/
|
||||
|
||||
public class PosterOutputStream extends ByteArrayOutputStream {
|
||||
|
||||
private boolean closed;
|
||||
|
||||
/**
|
||||
* Creates a new output stream for POST user data
|
||||
*/
|
||||
public PosterOutputStream () {
|
||||
super (256);
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes the specified byte to this output stream.
|
||||
*
|
||||
* @param b the byte to be written.
|
||||
*/
|
||||
public synchronized void write(int b) {
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
super.write (b);
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes <code>len</code> bytes from the specified byte array
|
||||
* starting at offset <code>off</code> to this output stream.
|
||||
*
|
||||
* @param b the data.
|
||||
* @param off the start offset in the data.
|
||||
* @param len the number of bytes to write.
|
||||
*/
|
||||
public synchronized void write(byte b[], int off, int len) {
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
super.write (b, off, len);
|
||||
}
|
||||
|
||||
/**
|
||||
* Resets the <code>count</code> field of this output
|
||||
* stream to zero, so that all currently accumulated output in the
|
||||
* output stream is discarded. The output stream can be used again,
|
||||
* reusing the already allocated buffer space. If the output stream
|
||||
* has been closed, then this method has no effect.
|
||||
*
|
||||
* @see java.io.ByteArrayInputStream#count
|
||||
*/
|
||||
public synchronized void reset() {
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
super.reset ();
|
||||
}
|
||||
|
||||
/**
|
||||
* After close() has been called, it is no longer possible to write
|
||||
* to this stream. Further calls to write will have no effect.
|
||||
*/
|
||||
public synchronized void close() throws IOException {
|
||||
closed = true;
|
||||
super.close ();
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user