mirror of https://github.com/apache/iotdb
fix sonars and add memory estimation method
This commit is contained in:
parent
2678988231
commit
f3f4099a32
|
@ -0,0 +1,48 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
if [ -z "${IOTDB_HOME}" ]; then
|
||||
export IOTDB_HOME="$(cd "`dirname "$0"`"/..; pwd)"
|
||||
fi
|
||||
|
||||
|
||||
MAIN_CLASS=org.apache.iotdb.cluster.service.nodetool.NodeTool
|
||||
|
||||
|
||||
CLASSPATH=""
|
||||
for f in ${IOTDB_HOME}/lib_cluster/*.jar; do
|
||||
CLASSPATH=${CLASSPATH}":"$f
|
||||
done
|
||||
|
||||
|
||||
if [ -n "$JAVA_HOME" ]; then
|
||||
for java in "$JAVA_HOME"/bin/amd64/java "$JAVA_HOME"/bin/java; do
|
||||
if [ -x "$java" ]; then
|
||||
JAVA="$java"
|
||||
break
|
||||
fi
|
||||
done
|
||||
else
|
||||
JAVA=java
|
||||
fi
|
||||
|
||||
exec "$JAVA" -cp "$CLASSPATH" "$MAIN_CLASS" "$@"
|
||||
|
||||
exit $?
|
|
@ -0,0 +1,58 @@
|
|||
@REM
|
||||
@REM Licensed to the Apache Software Foundation (ASF) under one
|
||||
@REM or more contributor license agreements. See the NOTICE file
|
||||
@REM distributed with this work for additional information
|
||||
@REM regarding copyright ownership. The ASF licenses this file
|
||||
@REM to you under the Apache License, Version 2.0 (the
|
||||
@REM "License"); you may not use this file except in compliance
|
||||
@REM with the License. You may obtain a copy of the License at
|
||||
@REM
|
||||
@REM http://www.apache.org/licenses/LICENSE-2.0
|
||||
@REM
|
||||
@REM Unless required by applicable law or agreed to in writing,
|
||||
@REM software distributed under the License is distributed on an
|
||||
@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
@REM KIND, either express or implied. See the License for the
|
||||
@REM specific language governing permissions and limitations
|
||||
@REM under the License.
|
||||
@REM
|
||||
|
||||
if "%OS%" == "Windows_NT" setlocal
|
||||
|
||||
pushd %~dp0..
|
||||
if NOT DEFINED IOTDB_HOME set IOTDB_HOME=%CD%
|
||||
popd
|
||||
|
||||
if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.cluster.service.nodetool.NodeTool
|
||||
if NOT DEFINED JAVA_HOME goto :err
|
||||
|
||||
@REM -----------------------------------------------------------------------------
|
||||
@REM JVM Opts we'll use in legacy run or installation
|
||||
set JAVA_OPTS=-ea^
|
||||
-DIOTDB_HOME=%IOTDB_HOME%
|
||||
|
||||
REM For each jar in the IOTDB_HOME lib directory call append to build the CLASSPATH variable.
|
||||
for %%i in ("%IOTDB_HOME%\lib_cluster\*.jar") do call :append "%%i"
|
||||
goto okClasspath
|
||||
|
||||
:append
|
||||
set CLASSPATH=%CLASSPATH%;%1
|
||||
goto :eof
|
||||
|
||||
REM -----------------------------------------------------------------------------
|
||||
:okClasspath
|
||||
|
||||
"%JAVA_HOME%\bin\java" %JAVA_OPTS% -cp "%CLASSPATH%" %MAIN_CLASS% %*
|
||||
|
||||
goto finally
|
||||
|
||||
|
||||
:err
|
||||
echo JAVA_HOME environment variable must be set!
|
||||
pause
|
||||
|
||||
|
||||
@REM -----------------------------------------------------------------------------
|
||||
:finally
|
||||
|
||||
ENDLOCAL
|
|
@ -26,7 +26,6 @@ import java.io.FileInputStream;
|
|||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.file.Files;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
|
|
|
@ -211,7 +211,7 @@ public class IoTDBConfig {
|
|||
/**
|
||||
* The memory used for stat performance.
|
||||
*/
|
||||
private int performance_stat_memory_in_kb = 20;
|
||||
private int performanceStatMemoryInKB = 20;
|
||||
/**
|
||||
* whether use chunkBufferPool.
|
||||
*/
|
||||
|
@ -565,12 +565,12 @@ public class IoTDBConfig {
|
|||
this.performanceStatDisplayInterval = performanceStatDisplayInterval;
|
||||
}
|
||||
|
||||
public int getPerformance_stat_memory_in_kb() {
|
||||
return performance_stat_memory_in_kb;
|
||||
public int getPerformanceStatMemoryInKB() {
|
||||
return performanceStatMemoryInKB;
|
||||
}
|
||||
|
||||
public void setPerformance_stat_memory_in_kb(int performance_stat_memory_in_kb) {
|
||||
this.performance_stat_memory_in_kb = performance_stat_memory_in_kb;
|
||||
public void setPerformanceStatMemoryInKB(int performanceStatMemoryInKB) {
|
||||
this.performanceStatMemoryInKB = performanceStatMemoryInKB;
|
||||
}
|
||||
|
||||
public long getMemtableSizeThreshold() {
|
||||
|
|
|
@ -226,9 +226,9 @@ public class IoTDBDescriptor {
|
|||
conf.setPerformanceStatDisplayInterval(Long
|
||||
.parseLong(properties.getProperty("performance_stat_display_interval",
|
||||
Long.toString(conf.getPerformanceStatDisplayInterval())).trim()));
|
||||
conf.setPerformance_stat_memory_in_kb(Integer
|
||||
conf.setPerformanceStatMemoryInKB(Integer
|
||||
.parseInt(properties.getProperty("performance_stat_memory_in_kb",
|
||||
Integer.toString(conf.getPerformance_stat_memory_in_kb())).trim()));
|
||||
Integer.toString(conf.getPerformanceStatMemoryInKB())).trim()));
|
||||
} catch (IOException e) {
|
||||
logger.warn("Cannot load config file because, use default configuration", e);
|
||||
} catch (Exception e) {
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.iotdb.db.conf.adapter;
|
|||
|
||||
import org.apache.iotdb.db.conf.IoTDBConfig;
|
||||
import org.apache.iotdb.db.conf.IoTDBDescriptor;
|
||||
import org.apache.iotdb.db.engine.StorageEngine;
|
||||
import org.apache.iotdb.db.exception.ConfigAdjusterException;
|
||||
import org.apache.iotdb.db.metadata.MManager;
|
||||
import org.apache.iotdb.db.rescon.PrimitiveArrayPool;
|
||||
|
@ -37,10 +36,11 @@ import org.slf4j.LoggerFactory;
|
|||
*
|
||||
* 1. maxMemTableNum. This parameter represents the size of the MemTable available in the MemTable
|
||||
* pool, which is closely related to the number of storage groups. When adding or deleting a storage
|
||||
* group, the parameter also adds or deletes two MemTables. The reason why adding or deleting two
|
||||
* group, the parameter also adds or deletes four MemTables. The reason why adding or deleting four
|
||||
* MemTables is that when the system is running stably, the speed of the flush operation is faster
|
||||
* than that of data writing, so one is used for the Flush process and the other is used for data
|
||||
* writing. Otherwise, the system should limit the speed of data writing to maintain stability.
|
||||
* writing. Otherwise, the system should limit the speed of data writing to maintain stability. And
|
||||
* two for sequence data, two for unsequence data.
|
||||
*
|
||||
* 2. memtableSize. This parameter determines the threshold value for the MemTable in memory to be
|
||||
* flushed into disk. When the system load increases, the parameter should be set smaller so that
|
||||
|
@ -75,16 +75,10 @@ public class IoTDBConfigDynamicAdapter implements IDynamicAdapter {
|
|||
|
||||
// static parameter section
|
||||
|
||||
/**
|
||||
* When the size of the adjusted MemTable decreases more than this parameter, trigger the global
|
||||
* flush operation and flush all MemTable that meets the flush condition to disk.
|
||||
*/
|
||||
private static final double FLUSH_THRESHOLD = 0.2;
|
||||
|
||||
/**
|
||||
* Maximum amount of memory allocated for write process.
|
||||
*/
|
||||
private static final long ALLOCATE_MEMORY_FOR_WRITE = CONFIG.getAllocateMemoryForWrite();
|
||||
private static long allocateMemoryForWrite = CONFIG.getAllocateMemoryForWrite();
|
||||
|
||||
/**
|
||||
* Metadata size of per timeseries, the default value is 2KB.
|
||||
|
@ -168,7 +162,7 @@ public class IoTDBConfigDynamicAdapter implements IDynamicAdapter {
|
|||
// when unit is byte, it's likely to cause Long type overflow.
|
||||
// so when b is larger than Integer.MAC_VALUE use the unit KB.
|
||||
double a = ratio * maxMemTableNum;
|
||||
double b = (ALLOCATE_MEMORY_FOR_WRITE - staticMemory) * ratio;
|
||||
double b = (allocateMemoryForWrite - staticMemory) * ratio;
|
||||
int magnification = b > Integer.MAX_VALUE ? 1024 : 1;
|
||||
b /= magnification;
|
||||
double c = (double) CONFIG.getTsFileSizeThreshold() * maxMemTableNum * CHUNK_METADATA_SIZE_IN_BYTE
|
||||
|
@ -187,7 +181,7 @@ public class IoTDBConfigDynamicAdapter implements IDynamicAdapter {
|
|||
* @return Tsfile byte threshold
|
||||
*/
|
||||
private long calcTsFileSize(long memTableSize) {
|
||||
return (long) ((ALLOCATE_MEMORY_FOR_WRITE - maxMemTableNum * memTableSize - staticMemory) * CompressionRatio
|
||||
return (long) ((allocateMemoryForWrite - maxMemTableNum * memTableSize - staticMemory) * CompressionRatio
|
||||
.getInstance().getRatio()
|
||||
* memTableSize / (maxMemTableNum * CHUNK_METADATA_SIZE_IN_BYTE * MManager.getInstance()
|
||||
.getMaximalSeriesNumberAmongStorageGroups()));
|
||||
|
@ -253,6 +247,7 @@ public class IoTDBConfigDynamicAdapter implements IDynamicAdapter {
|
|||
totalTimeseries = 0;
|
||||
staticMemory = 0;
|
||||
maxMemTableNum = MEM_TABLE_AVERAGE_QUEUE_LEN;
|
||||
allocateMemoryForWrite = CONFIG.getAllocateMemoryForWrite();
|
||||
initialized = false;
|
||||
}
|
||||
|
||||
|
|
|
@ -120,7 +120,7 @@ public class Measurement implements MeasurementMBean, IService {
|
|||
IoTDBConfig tdbConfig = IoTDBDescriptor.getInstance().getConfig();
|
||||
isEnableStat = tdbConfig.isEnablePerformanceStat();
|
||||
displayIntervalInMs = tdbConfig.getPerformanceStatDisplayInterval();
|
||||
int memoryInKb = tdbConfig.getPerformance_stat_memory_in_kb();
|
||||
int memoryInKb = tdbConfig.getPerformanceStatMemoryInKB();
|
||||
|
||||
queueSize = memoryInKb * 1000 / Operation.values().length / 8;
|
||||
operationLatenciesQueue = new ConcurrentCircularArray[Operation.values().length];
|
||||
|
|
|
@ -180,54 +180,6 @@ public abstract class AbstractMemTable implements IMemTable {
|
|||
this.modifications.add(deletion);
|
||||
}
|
||||
|
||||
/**
|
||||
* If chunk contains data with timestamp less than 'timestamp', create a copy and delete all those
|
||||
* data. Otherwise return null.
|
||||
*
|
||||
* @param chunk the source chunk.
|
||||
* @param timestamp the upper-bound of deletion time.
|
||||
* @return A reduced copy of chunk if chunk contains data with timestamp less than 'timestamp', of
|
||||
* null.
|
||||
*/
|
||||
private IWritableMemChunk filterChunk(IWritableMemChunk chunk, long timestamp) {
|
||||
|
||||
if (!chunk.isEmpty() && chunk.getMinTime() <= timestamp) {
|
||||
//TODO we can avoid sorting data here by scanning data once.
|
||||
List<TimeValuePair> timeValuePairs = chunk.getSortedTimeValuePairList();
|
||||
TSDataType dataType = chunk.getType();
|
||||
IWritableMemChunk newChunk = genMemSeries(dataType);
|
||||
for (TimeValuePair pair : timeValuePairs) {
|
||||
if (pair.getTimestamp() > timestamp) {
|
||||
switch (dataType) {
|
||||
case BOOLEAN:
|
||||
newChunk.putBoolean(pair.getTimestamp(), pair.getValue().getBoolean());
|
||||
break;
|
||||
case DOUBLE:
|
||||
newChunk.putDouble(pair.getTimestamp(), pair.getValue().getDouble());
|
||||
break;
|
||||
case INT64:
|
||||
newChunk.putLong(pair.getTimestamp(), pair.getValue().getLong());
|
||||
break;
|
||||
case INT32:
|
||||
newChunk.putInt(pair.getTimestamp(), pair.getValue().getInt());
|
||||
break;
|
||||
case FLOAT:
|
||||
newChunk.putFloat(pair.getTimestamp(), pair.getValue().getFloat());
|
||||
break;
|
||||
case TEXT:
|
||||
newChunk.putBinary(pair.getTimestamp(), pair.getValue().getBinary());
|
||||
break;
|
||||
default:
|
||||
throw new UnsupportedOperationException("Unknown datatype: " + dataType);
|
||||
}
|
||||
}
|
||||
}
|
||||
TVListAllocator.getInstance().release(dataType, chunk.getTVList());
|
||||
return newChunk;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public void setVersion(long version) {
|
||||
this.version = version;
|
||||
}
|
||||
|
|
|
@ -25,14 +25,11 @@ import java.io.File;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.stream.Collectors;
|
||||
import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
|
||||
import org.apache.iotdb.db.engine.memtable.IMemTable;
|
||||
import org.apache.iotdb.db.engine.memtable.MemTableFlushTask;
|
||||
import org.apache.iotdb.db.engine.memtable.PrimitiveMemTable;
|
||||
import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
|
||||
import org.apache.iotdb.db.engine.version.VersionController;
|
||||
import org.apache.iotdb.db.exception.ProcessorException;
|
||||
import org.apache.iotdb.db.writelog.manager.MultiFileLogNodeManager;
|
||||
|
|
|
@ -131,6 +131,7 @@ public class IoTDBConfigDynamicAdapterTest {
|
|||
MManager.getInstance().setMaxSeriesNumberAmongStorageGroup(i / 30 + 1);
|
||||
}
|
||||
} catch (ConfigAdjusterException e) {
|
||||
System.out.println(i);
|
||||
assertEquals("The IoTDB system load is too large to add timeseries.", e.getMessage());
|
||||
}
|
||||
int j =0;
|
||||
|
@ -145,4 +146,44 @@ public class IoTDBConfigDynamicAdapterTest {
|
|||
assertEquals("The IoTDB system load is too large to add timeseries.", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void addOrDeleteTimeSeriesSyso2() {
|
||||
int sgNum = 1;
|
||||
IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig();
|
||||
long memTableSize = config.getMemtableSizeThreshold();
|
||||
int maxMemtableNumber = config.getMaxMemtableNumber();
|
||||
long tsFileSize = config.getTsFileSizeThreshold();
|
||||
long memory = 1024 * 1024 * 1024L;
|
||||
while (true) {
|
||||
config.setAllocateMemoryForWrite(memory);
|
||||
config.setMemtableSizeThreshold(memTableSize);
|
||||
config.setMaxMemtableNumber(maxMemtableNumber);
|
||||
config.setTsFileSizeThreshold(tsFileSize);
|
||||
IoTDBConfigDynamicAdapter.getInstance().reset();
|
||||
IoTDBConfigDynamicAdapter.getInstance().setInitialized(true);
|
||||
MManager.getInstance().clear();
|
||||
for (int i = 1; i <= 50 ; i++) {
|
||||
try {
|
||||
IoTDBConfigDynamicAdapter.getInstance().addOrDeleteStorageGroup(sgNum);
|
||||
} catch (ConfigAdjusterException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
int i = 1;
|
||||
try {
|
||||
for (; i <= 10000000; i++) {
|
||||
IoTDBConfigDynamicAdapter.getInstance().addOrDeleteTimeSeries(1);
|
||||
MManager.getInstance().setMaxSeriesNumberAmongStorageGroup(i / 50 + 1);
|
||||
}
|
||||
} catch (ConfigAdjusterException e) {
|
||||
// System.out.println(i);
|
||||
memory += 1024 * 1024 * 1024L;
|
||||
// System.out.println("Memory for writing: " + memory / 1024 / 1024 / 1024 + "GB");
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
System.out.println("Memory for writing: " + memory / 1024 / 1024 / 1024 + "GB");
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue