diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/InternalParquetRecordWriter.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/InternalParquetRecordWriter.java index 41b068d01a..7674cab757 100644 --- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/InternalParquetRecordWriter.java +++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/InternalParquetRecordWriter.java @@ -171,7 +171,9 @@ public long getDataSize() { private void checkBlockSizeReached() throws IOException { if (recordCount >= rowGroupRecordCountThreshold) { - LOG.debug("record count reaches threshold: flushing {} records to disk.", recordCount); + if (LOG.isDebugEnabled()) { + LOG.debug("record count reaches threshold: flushing {} records to disk.", recordCount); + } flushRowGroupToStore(); initStore(); recordCountForNextMemCheck = min( @@ -185,7 +187,9 @@ private void checkBlockSizeReached() throws IOException { // flush the row group if it is within ~2 records of the limit // it is much better to be slightly under size than to be over at all if (memSize > (nextRowGroupSize - 2 * recordSize)) { - LOG.debug("mem size {} > {}: flushing {} records to disk.", memSize, nextRowGroupSize, recordCount); + if (LOG.isDebugEnabled()) { + LOG.debug("mem size {} > {}: flushing {} records to disk.", memSize, nextRowGroupSize, recordCount); + } flushRowGroupToStore(); initStore(); recordCountForNextMemCheck = min( @@ -201,7 +205,9 @@ private void checkBlockSizeReached() throws IOException { recordCount + props.getMaxRowCountForPageSizeCheck() // will not look more than max records ahead ); - LOG.debug("Checked mem at {} will check again at: {}", recordCount, recordCountForNextMemCheck); + if (LOG.isDebugEnabled()) { + LOG.debug("Checked mem at {} will check again at: {}", recordCount, recordCountForNextMemCheck); + } } } }