Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .gitattributes
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# Mark all Jazzer inputs as binary, to avoid bytes in them being misinterpreted as line terminators and being
# changed on checkout
/src/test/resources/**/*Inputs/** binary
23 changes: 23 additions & 0 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -42,3 +42,26 @@ jobs:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./target/site/jacoco/jacoco.xml
flags: unittests

# TODO: Maybe consider caching Jazzer `.cifuzz-corpus` directory if that improves fuzzing performance?
# But could become outdated when fuzz test methods are changed
fuzz:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Set up JDK
uses: actions/setup-java@dded0888837ed1f317902acf8a20df0ad188d165 # v5.0.0
with:
distribution: 'temurin'
java-version: 17
cache: 'maven'
- name: Run tests
id: fuzz-tests
# Don't run with `-q`, to see fuzzing progress
run: ./mvnw -B -ff -ntp --activate-profiles fuzz test
- name: Upload fuzz test inputs
if: always() && steps.fuzz-tests.outcome == 'failure'
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
with:
name: fuzz-test-inputs
path: src/test/resources/**/*Inputs/**
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -17,3 +17,6 @@ test-output
server/logs
runtime
logs

# Jazzer fuzzing corpus
/.cifuzz-corpus/
2 changes: 2 additions & 0 deletions VERSION.txt
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@
(contributed by @Marcono1234)
#64: Fix differences between big- and little-endian encoder
(contributed by @Marcono1234)
#68: Improve bounds checks
(contributed by @Marcono1234)
- Updated `oss-parent` dep to latest (v72)

1.1.3 (26-Sep-2025)
Expand Down
106 changes: 106 additions & 0 deletions pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,12 @@
<artifactId>junit-jupiter</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.code-intelligence</groupId>
<artifactId>jazzer-junit</artifactId>
<version>0.28.0</version>
<scope>test</scope>
</dependency>
</dependencies>

<build>
Expand Down Expand Up @@ -236,6 +242,106 @@
</build>

<profiles>
<!--
Profile for fuzzing, setting ENV variable and configuring individual fuzz methods to run
Should be run separately from regular build because it disables normal unit tests and only runs fuzz tests
-->
<profile>
<id>fuzz</id>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<environmentVariables>
<!-- Enable Jazzer fuzzing mode, see https://github.com/CodeIntelligenceTesting/jazzer?tab=readme-ov-file#fuzzing-mode -->
<JAZZER_FUZZ>1</JAZZER_FUZZ>
</environmentVariables>
<failIfNoTests>true</failIfNoTests>
</configuration>
<!--
Currently have to list all fuzz tests separately, see
https://github.com/CodeIntelligenceTesting/jazzer/issues/599
-->
<executions>
<!-- Skip default execution -->
<execution>
<id>default-test</id>
<configuration>
<skipTests>true</skipTests>
</configuration>
</execution>
<!-- Explicitly list separate fuzz test methods -->
<execution>
<id>fuzz-TestFuzzUnsafeLZF#decode</id>
<goals>
<goal>test</goal>
</goals>
<configuration>
<test>TestFuzzUnsafeLZF#decode</test>
</configuration>
</execution>
<execution>
<id>fuzz-TestFuzzUnsafeLZF#roundtrip</id>
<goals>
<goal>test</goal>
</goals>
<configuration>
<test>TestFuzzUnsafeLZF#roundtrip</test>
</configuration>
</execution>
<execution>
<id>fuzz-TestFuzzUnsafeLZF#encode</id>
<goals>
<goal>test</goal>
</goals>
<configuration>
<test>TestFuzzUnsafeLZF#encode</test>
</configuration>
</execution>
<execution>
<id>fuzz-TestFuzzUnsafeLZF#encodeAppend</id>
<goals>
<goal>test</goal>
</goals>
<configuration>
<test>TestFuzzUnsafeLZF#encodeAppend</test>
</configuration>
</execution>
<execution>
<id>fuzz-TestFuzzUnsafeLZF#inputStreamRead</id>
<goals>
<goal>test</goal>
</goals>
<configuration>
<test>TestFuzzUnsafeLZF#inputStreamRead</test>
</configuration>
</execution>
<execution>
<id>fuzz-TestFuzzUnsafeLZF#inputStreamSkip</id>
<goals>
<goal>test</goal>
</goals>
<configuration>
<test>TestFuzzUnsafeLZF#inputStreamSkip</test>
</configuration>
</execution>
<execution>
<id>fuzz-TestFuzzUnsafeLZF#outputStream</id>
<goals>
<goal>test</goal>
</goals>
<configuration>
<test>TestFuzzUnsafeLZF#outputStream</test>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>

<profile>
<id>release-sign-artifacts</id>
<activation>
Expand Down
12 changes: 5 additions & 7 deletions src/main/java/com/ning/compress/BufferRecycler.java
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,8 @@ public final class BufferRecycler
* to a {@link BufferRecycler} used to provide a low-cost
* buffer recycling for buffers we need for encoding, decoding.
*/
final protected static ThreadLocal<SoftReference<BufferRecycler>> _recyclerRef
= new ThreadLocal<SoftReference<BufferRecycler>>();

final protected static ThreadLocal<SoftReference<BufferRecycler>> _recyclerRef
= new ThreadLocal<SoftReference<BufferRecycler>>();

private byte[] _inputBuffer;
private byte[] _outputBuffer;
Expand All @@ -31,7 +30,7 @@ public final class BufferRecycler
private byte[] _encodingBuffer;

private int[] _encodingHash;

/**
* Accessor to get thread-local recycler instance
*/
Expand All @@ -51,7 +50,7 @@ public static BufferRecycler instance()
// Buffers for encoding (output)
///////////////////////////////////////////////////////////////////////
*/

public byte[] allocEncodingBuffer(int minSize)
{
byte[] buf = _encodingBuffer;
Expand All @@ -69,7 +68,7 @@ public void releaseEncodeBuffer(byte[] buffer)
_encodingBuffer = buffer;
}
}

public byte[] allocOutputBuffer(int minSize)
{
byte[] buf = _outputBuffer;
Expand Down Expand Up @@ -147,5 +146,4 @@ public void releaseDecodeBuffer(byte[] buffer)
_decodingBuffer = buffer;
}
}

}
36 changes: 29 additions & 7 deletions src/main/java/com/ning/compress/lzf/ChunkDecoder.java
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,7 @@ public ChunkDecoder() { }
*/
public final byte[] decode(final byte[] inputBuffer) throws LZFException
{
byte[] result = new byte[calculateUncompressedSize(inputBuffer, 0, inputBuffer.length)];
decode(inputBuffer, 0, inputBuffer.length, result);
return result;
return decode(inputBuffer, 0, inputBuffer.length);
}

/**
Expand All @@ -49,7 +47,10 @@ public final byte[] decode(final byte[] inputBuffer) throws LZFException
public final byte[] decode(final byte[] inputBuffer, int inputPtr, int inputLen) throws LZFException
{
byte[] result = new byte[calculateUncompressedSize(inputBuffer, inputPtr, inputLen)];
decode(inputBuffer, inputPtr, inputLen, result);
int decodedLen = decode(inputBuffer, inputPtr, inputLen, result);
if (decodedLen != result.length) {
throw new LZFException("Bad `decode()`: decodedLen="+decodedLen+" != "+result.length+" (expected)");
}
return result;
}

Expand Down Expand Up @@ -78,9 +79,9 @@ public int decode(final byte[] sourceBuffer, int inPtr, int inLength,
int outPtr = 0;
int blockNr = 0;

final int end = inPtr + inLength - 1; // -1 to offset possible end marker
final int endMinusOne = inPtr + inLength - 1; // -1 to offset possible end marker

while (inPtr < end) {
while (inPtr < endMinusOne) {
// let's do basic sanity checks; no point in skimping with these checks
if (sourceBuffer[inPtr] != LZFChunk.BYTE_Z || sourceBuffer[inPtr+1] != LZFChunk.BYTE_V) {
throw new LZFException("Corrupt input data, block #"+blockNr+" (at offset "+inPtr+"): did not start with 'ZV' signature bytes");
Expand All @@ -101,10 +102,15 @@ public int decode(final byte[] sourceBuffer, int inPtr, int inLength,
_reportArrayOverflow(targetBuffer, outPtr, uncompLen);
}
inPtr += 2;
decodeChunk(sourceBuffer, inPtr, targetBuffer, outPtr, outPtr+uncompLen);
decodeChunk(sourceBuffer, inPtr, inPtr + len, targetBuffer, outPtr, outPtr+uncompLen);
outPtr += uncompLen;
}
inPtr += len;

// Fail if more input than expected was consumed, respectively if `inLength` does not include full block
if (inPtr > endMinusOne + 1) {
throw new LZFException("Corrupt input data, block #" + blockNr + " is incomplete");
}
++blockNr;
}
return outPtr;
Expand All @@ -125,10 +131,26 @@ public abstract int decodeChunk(final InputStream is, final byte[] inputBuffer,

/**
* Main decode method for individual chunks.
*
* @deprecated since 1.2 Use {@link #decodeChunk(byte[], int, int, byte[], int, int)} instead
*/
@Deprecated // @since 1.2
public abstract void decodeChunk(byte[] in, int inPos, byte[] out, int outPos, int outEnd)
throws LZFException;

/**
* Main decode method for individual chunks.
*
* <p>For backward compatibility this method just delegates to {@link #decodeChunk(byte[], int, byte[], int, int)},
* ignoring the {@code inEnd} parameter. Subclasses should override it and consider the {@code inEnd} parameter.
*
* @since 1.2
*/
public void decodeChunk(byte[] in, int inPos, int inEnd, byte[] out, int outPos, int outEnd)
throws LZFException {
decodeChunk(in, inPos, out, outPos, outEnd);
}

/**
* @return If positive number, number of bytes skipped; if -1, end-of-stream was
* reached; otherwise, amount of content
Expand Down
2 changes: 1 addition & 1 deletion src/main/java/com/ning/compress/lzf/LZFUncompressor.java
Original file line number Diff line number Diff line change
Expand Up @@ -327,7 +327,7 @@ private final void _uncompress(byte[] src, int srcOffset, int len) throws IOExce
if (_decodeBuffer == null) {
_decodeBuffer = _recycler.allocDecodeBuffer(LZFChunk.MAX_CHUNK_LEN);
}
_decoder.decodeChunk(src, srcOffset, _decodeBuffer, 0, _uncompressedLength);
_decoder.decodeChunk(src, srcOffset, srcOffset + len, _decodeBuffer, 0, _uncompressedLength);
_handler.handleData(_decodeBuffer, 0, _uncompressedLength);
}

Expand Down
29 changes: 23 additions & 6 deletions src/main/java/com/ning/compress/lzf/impl/UnsafeChunkDecoder.java
Original file line number Diff line number Diff line change
Expand Up @@ -63,27 +63,35 @@ public final int decodeChunk(final InputStream is, final byte[] inputBuffer, fin
// compressed
readFully(is, true, inputBuffer, 0, 2+compLen); // first 2 bytes are uncompressed length
int uncompLen = uint16(inputBuffer, 0);
decodeChunk(inputBuffer, 2, outputBuffer, 0, uncompLen);
decodeChunk(inputBuffer, 2, 2 + compLen, outputBuffer, 0, uncompLen);
return uncompLen;
}


@Override
public void decodeChunk(byte[] in, int inPos, byte[] out, int outPos, int outEnd) throws LZFException {
decodeChunk(in, inPos, in.length, out, outPos, outEnd);
}

@Override
public final void decodeChunk(byte[] in, int inPos, byte[] out, int outPos, int outEnd)
public final void decodeChunk(byte[] in, int inPos, int inEnd, byte[] out, int outPos, int outEnd)
throws LZFException
{
// Sanity checks; otherwise if any of the arguments are invalid `Unsafe` might corrupt memory
checkArrayIndices(in, inPos, in.length);
checkArrayIndices(in, inPos, inEnd);
checkArrayIndices(out, outPos, outEnd);

final int outPosStart = outPos;

// We need to take care of end condition, leave last 32 bytes out
final int inputEnd32 = inEnd - 32;
final int outputEnd8 = outEnd - 8;
final int outputEnd32 = outEnd - 32;

main_loop:
do {
int ctrl = in[inPos++] & 255;
while (ctrl < LZFChunk.MAX_LITERAL) { // literal run(s)
if (outPos > outputEnd32) {
if (outPos > outputEnd32 || inPos > inputEnd32) {
System.arraycopy(in, inPos, out, outPos, ctrl+1);
} else {
copyUpTo32(in, inPos, out, outPos, ctrl);
Expand All @@ -103,6 +111,9 @@ public final void decodeChunk(byte[] in, int inPos, byte[] out, int outPos, int
if (len < 7) {
ctrl -= in[inPos++] & 255;
if (ctrl < -7 && outPos < outputEnd8) { // non-overlapping? can use efficient bulk copy
if (outPos + ctrl < outPosStart) {
throw new LZFException("Invalid back reference");
}
final long rawOffset = BYTE_ARRAY_OFFSET + outPos;
unsafe.putLong(out, rawOffset, unsafe.getLong(out, rawOffset + ctrl));
// moveLong(out, outPos, outEnd, ctrl);
Expand All @@ -122,6 +133,9 @@ public final void decodeChunk(byte[] in, int inPos, byte[] out, int outPos, int
continue;
}
// but non-overlapping is simple
if (outPos + ctrl < outPosStart) {
throw new LZFException("Invalid back reference");
}
if (len <= 32) {
copyUpTo32(out, outPos+ctrl, outPos, len-1);
outPos += len;
Expand All @@ -132,6 +146,9 @@ public final void decodeChunk(byte[] in, int inPos, byte[] out, int outPos, int
} while (outPos < outEnd);

// sanity check to guard against corrupt data:
if (inPos != inEnd) {
throw new LZFException("Corrupt data: unexpected input amount was consumed");
}
if (outPos != outEnd) {
throw new LZFException("Corrupt data: overrun in decompress, input offset "+inPos+", output offset "+outPos);
}
Expand Down Expand Up @@ -170,7 +187,7 @@ public int skipOrDecodeChunk(final InputStream is, final byte[] inputBuffer,
}
// otherwise, read and uncompress the chunk normally
readFully(is, true, inputBuffer, 2, compLen); // first 2 bytes are uncompressed length
decodeChunk(inputBuffer, 2, outputBuffer, 0, uncompLen);
decodeChunk(inputBuffer, 2, 2 + compLen, outputBuffer, 0, uncompLen);
return -(uncompLen+1);
}

Expand Down
Loading