fix a bug in aggregation without filter process (#377)

This commit is contained in:
aoei.me 2018-05-06 10:15:06 +08:00 committed by XuYi
parent 25254be661
commit 03dc388a7f
3 changed files with 26 additions and 5 deletions

View File

@ -118,6 +118,8 @@ public class OverflowQueryEngine {
if (aggregateFunction.resultData.timeLength == 0) {
aggregateFunction.putDefaultValue();
}
LOGGER.debug(String.format("key %s, data length %s, empty data length %s", EngineUtils.aggregationKey(aggregateFunction, pair.left),
aggregateFunction.resultData.timeLength, aggregateFunction.resultData.emptyTimeLength));
ansQueryDataSet.mapRet.put(EngineUtils.aggregationKey(aggregateFunction, pair.left), aggregateFunction.resultData);
}
// aggregateThreadLocal.set(ansQueryDataSet);

View File

@ -231,7 +231,8 @@ public class AggregateRecordReader extends RecordReader {
valueReader.setDecoder(Decoder.getDecoderByType(pageHeader.getData_page_header().getEncoding(), dataType));
result = ReaderUtils.readOnePage(dataType, timestamps, valueReader.decoder, page, result,
queryTimeFilter, queryValueFilter, insertMemoryData, overflowOperationReaderCopy);
func.calculateValueFromDataPage(result);
if (result.valueLength > 0)
func.calculateValueFromDataPage(result);
result.clearData();
}
}

View File

@ -66,9 +66,9 @@ public class LargeDataTest {
pageSizeInByte = tsFileConfig.pageSizeInByte;
groupSizeInByte = tsFileConfig.groupSizeInByte;
// new value
tsFileConfig.maxNumberOfPointsInPage = 100;
tsFileConfig.pageSizeInByte = 1024 * 1024 * 15;
tsFileConfig.groupSizeInByte = 1024 * 1024 * 100;
tsFileConfig.maxNumberOfPointsInPage = 1000;
tsFileConfig.pageSizeInByte = 1024 * 1024 * 150;
tsFileConfig.groupSizeInByte = 1024 * 1024 * 1000;
deamon = IoTDB.getInstance();
deamon.active();
@ -425,6 +425,24 @@ public class LargeDataTest {
assertEquals(1, cnt);
statement.close();
// (3). aggregation test : there is no value in series d1.s0 in the given time range
sql = "select max_value(s0),min_value(s0)" +
"from root.vehicle.d0 where time > 13601 and time < 13602";
statement = connection.createStatement();
hasResultSet = statement.execute(sql);
Assert.assertTrue(hasResultSet);
resultSet = statement.getResultSet();
cnt = 0;
while (resultSet.next()) {
String ans = resultSet.getString(TIMESTAMP_STR)
+ "," + resultSet.getString(max_value(d0s0)) + "," + resultSet.getString(min_value(d0s0));
assertEquals("0,null,null", ans);
//System.out.println(".." + ans);
cnt++;
}
assertEquals(1, cnt);
statement.close();
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
@ -865,7 +883,7 @@ public class LargeDataTest {
statement.execute(sql);
}
statement.execute("flush");
// statement.execute("flush");
// insert large amount of data time range : 13700 ~ 24000
for (int time = 13700; time < 24000; time++) {