Skip to content

Commit 890486a

Browse files
committed
Added limit to file size and request timeouts
1 parent 23eb500 commit 890486a

File tree

9 files changed

+151
-157
lines changed

9 files changed

+151
-157
lines changed

README.md

+7-13
Original file line numberDiff line numberDiff line change
@@ -6,22 +6,16 @@ The remote plugin is for forwarding metric data to a remote Kairos instance.
66
Metric data is gathered locally on the filesystem where it is compressed and uploaded to the
77
remote Kairos on specified intervals. (see kairos-remote.properties for options)
88

9-
## Remote Datastore
10-
The remote plugin can be loaded in one of two ways. The first is as the Kairos datastore:
11-
12-
```properties
13-
kairosdb.service.datastore=org.kairosdb.plugin.remote.RemoteModule
14-
```
15-
16-
This effectively makes the Kairos node write only. The node will not try to connect to
17-
Cassandra or load the H2 database.
18-
199
## Remote Listener
20-
The second way to load the remote plugin is as a data point listener:
10+
The remote plugin comes with a data points listener class and in order to laod
11+
it you load the `ListenerModule` in your kairos configuration:
2112

2213
```properties
2314
kairosdb.datastore.remote.service.remote=org.kairosdb.plugin.remote.ListenerModule
2415
```
2516

26-
The `ListenerModule` adds a listener to the data point events going into the datastore and
27-
forwards the events on to a remote Kairos instance. Effectively letting you fork the data.
17+
The `ListenerModule` adds a listener to the data point events coming into kairos and
18+
forwards the events on to a remote Kairos instance. Effectively letting you fork the data.
19+
20+
For a pure remote Kairos instance you can comment out the datastore modules and just
21+
use the `ListenerModule`, effectively making the Kairos instance a write only node.

src/main/java/org/kairosdb/plugin/remote/ListenerModule.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ public class ListenerModule extends AbstractModule
1111
@Override
1212
protected void configure()
1313
{
14-
bind(RemoteDatastore.class).in(Scopes.SINGLETON);
14+
bind(RemoteListener.class).in(Scopes.SINGLETON);
1515
bind(RemoteSendJob.class).in(Scopes.SINGLETON);
1616
bind(RemoteDatastoreHealthCheck.class).in(Scopes.SINGLETON);
1717
bind(DiskUtils.class).to(DiskUtilsImpl.class);

src/main/java/org/kairosdb/plugin/remote/NullServiceKeyStore.java

-49
This file was deleted.

src/main/java/org/kairosdb/plugin/remote/RemoteHostImpl.java

+14-3
Original file line numberDiff line numberDiff line change
@@ -26,16 +26,27 @@
2626
public class RemoteHostImpl implements RemoteHost
2727
{
2828
private static final Logger logger = LoggerFactory.getLogger(RemoteHostImpl.class);
29-
private static final String REMOTE_URL_PROP = "kairosdb.datastore.remote.remote_url";
29+
private static final String REMOTE_URL_PROP = "kairosdb.remote.remote_url";
30+
private static final String CONNECTION_REQUEST_TIMEOUT = "kairosdb.remote.connection_request_timeout";
31+
private static final String CONNECTION_TIMEOUT = "kairosdb.remote.connection_timeout";
32+
private static final String SOCKET_TIMEOUT = "kairosdb.remote.socket_timeout";
3033

3134
private final String url;
3235
private CloseableHttpClient client;
3336

3437
@Inject
35-
public RemoteHostImpl(@Named(REMOTE_URL_PROP) String remoteUrl)
38+
public RemoteHostImpl(@Named(REMOTE_URL_PROP) String remoteUrl,
39+
@Named(CONNECTION_REQUEST_TIMEOUT) int requestTimeout,
40+
@Named(CONNECTION_TIMEOUT) int connectionTimeout,
41+
@Named(SOCKET_TIMEOUT) int socketTimeout)
3642
{
3743
this.url = checkNotNullOrEmpty(remoteUrl, "url must not be null or empty");
3844
client = HttpClients.createDefault();
45+
RequestConfig requestConfig = RequestConfig.custom().setSocketTimeout(socketTimeout)
46+
.setConnectionRequestTimeout(requestTimeout)
47+
.setConnectTimeout(connectionTimeout)
48+
.build();
49+
HttpClients.custom().setDefaultRequestConfig(requestConfig);
3950
}
4051

4152
@Override
@@ -67,7 +78,7 @@ else if (response.getStatusLine().getStatusCode() == 400)
6778
{
6879
//This means it was a bad file, more than likely the json is not well formed
6980
//renaming it will make sure we don't try it again as it will likely fail again
70-
//Most of the data likely was loaded into kairos
81+
//All of the data likely was loaded into kairos especially if it was missing the last ]
7182
ByteArrayOutputStream body = new ByteArrayOutputStream();
7283
response.getEntity().writeTo(body);
7384
logger.error("Unable to send file " + zipFile + ": " + response.getStatusLine() +

src/main/java/org/kairosdb/plugin/remote/RemoteDatastore.java src/main/java/org/kairosdb/plugin/remote/RemoteListener.java

+90-41
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616

1717
package org.kairosdb.plugin.remote;
1818

19+
import com.google.common.base.Stopwatch;
1920
import com.google.common.collect.ArrayListMultimap;
2021
import com.google.common.collect.ImmutableSortedMap;
2122
import com.google.common.collect.Multimap;
@@ -39,49 +40,53 @@
3940
import java.nio.file.Files;
4041
import java.util.Arrays;
4142
import java.util.SortedMap;
43+
import java.util.concurrent.TimeUnit;
4244
import java.util.zip.GZIPOutputStream;
4345

4446
import static com.google.common.base.Preconditions.checkArgument;
4547
import static com.google.common.base.Preconditions.checkNotNull;
4648
import static com.google.common.base.Strings.isNullOrEmpty;
4749

4850

49-
public class RemoteDatastore
51+
public class RemoteListener
5052
{
51-
private static final Logger logger = LoggerFactory.getLogger(RemoteDatastore.class);
52-
private static final String DATA_DIR_PROP = "kairosdb.datastore.remote.data_dir";
53-
private static final String DROP_PERCENT_PROP = "kairosdb.datastore.remote.drop_on_used_disk_space_threshold_percent";
54-
private static final String METRIC_PREFIX_FILTER = "kairosdb.datastore.remote.prefix_filter";
55-
56-
private static final String FILE_SIZE_METRIC = "kairosdb.datastore.remote.file_size";
57-
private static final String ZIP_FILE_SIZE_METRIC = "kairosdb.datastore.remote.zip_file_size";
58-
private static final String WRITE_SIZE_METRIC = "kairosdb.datastore.remote.write_size";
59-
private static final String TIME_TO_SEND_METRIC = "kairosdb.datastore.remote.time_to_send";
60-
private static final String DELETE_ZIP_METRIC = "kairosdb.datastore.remote.deleted_zipFile_size";
53+
private static final Logger logger = LoggerFactory.getLogger(RemoteListener.class);
54+
private static final String DATA_DIR_PROP = "kairosdb.remote.data_dir";
55+
private static final String DROP_PERCENT_PROP = "kairosdb.remote.drop_on_used_disk_space_threshold_percent";
56+
private static final String METRIC_PREFIX_FILTER = "kairosdb.remote.prefix_filter";
57+
58+
private static final String FILE_SIZE_METRIC = "kairosdb.remote.file_size";
59+
private static final String ZIP_FILE_SIZE_METRIC = "kairosdb.remote.zip_file_size";
60+
private static final String WRITE_SIZE_METRIC = "kairosdb.remote.write_size";
61+
private static final String TIME_TO_SEND_METRIC = "kairosdb.remote.time_to_send";
62+
private static final String DELETE_ZIP_METRIC = "kairosdb.remote.deleted_zipFile_size";
63+
private static final String FLUSH_INTERVAL = "kairosdb.remote.flush_interval_ms";
64+
private static final String MAX_FILE_SIZE_MB = "kairosdb.remote.max_file_size_mb";
6165

6266
private final Object m_dataFileLock = new Object();
6367
private final Object m_sendLock = new Object();
6468
private final int m_dropPercent;
6569
private final File m_dataDirectory;
6670
private final RemoteHost m_remoteHost;
6771
private final DiskUtils m_diskUtils;
72+
private final int m_flushInterval;
73+
private final ImmutableSortedMap<String, String> m_tags;
6874
private BufferedWriter m_dataWriter;
6975
private final Publisher<DataPointEvent> m_publisher;
7076
private String m_dataFileName;
7177
private volatile boolean m_firstDataPoint = true;
7278
private int m_dataPointCounter;
79+
private Stopwatch m_sendTimer = Stopwatch.createUnstarted();
7380

7481
private volatile Multimap<DataPointKey, DataPoint> m_dataPointMultimap;
7582
private final Object m_mapLock = new Object(); //Lock for the above map
7683

7784
private volatile boolean m_running;
7885

79-
@Inject
80-
@Named("HOSTNAME")
81-
private String m_hostName = "localhost";
82-
8386
private String[] m_prefixFilterArray = new String[0];
8487

88+
private long m_maxFileSize = 1024*1024*10;
89+
8590
@Inject
8691
private LongDataPointFactory m_longDataPointFactory = new LongDataPointFactoryImpl();
8792

@@ -96,9 +101,13 @@ public void setPrefixFilter(@Named(METRIC_PREFIX_FILTER) String prefixFilter)
96101
}
97102

98103
@Inject
99-
public RemoteDatastore(@Named(DATA_DIR_PROP) String dataDir,
100-
@Named(DROP_PERCENT_PROP) String dropPercent, RemoteHost remoteHost,
101-
FilterEventBus eventBus, DiskUtils diskUtils) throws IOException, DatastoreException
104+
public RemoteListener(@Named(DATA_DIR_PROP) String dataDir,
105+
@Named(DROP_PERCENT_PROP) String dropPercent,
106+
@Named(FLUSH_INTERVAL) int flushInterval,
107+
@Named("HOSTNAME") String hostName,
108+
RemoteHost remoteHost,
109+
FilterEventBus eventBus,
110+
DiskUtils diskUtils) throws IOException, DatastoreException
102111
{
103112
m_dataDirectory = new File(dataDir);
104113
m_dropPercent = Integer.parseInt(dropPercent);
@@ -107,6 +116,11 @@ public RemoteDatastore(@Named(DATA_DIR_PROP) String dataDir,
107116
m_remoteHost = checkNotNull(remoteHost, "remote host must not be null");
108117
m_publisher = eventBus.createPublisher(DataPointEvent.class);
109118
m_diskUtils = checkNotNull(diskUtils, "diskUtils must not be null");
119+
m_flushInterval = flushInterval;
120+
121+
m_tags = ImmutableSortedMap.<String, String>naturalOrder()
122+
.put("host", hostName)
123+
.build();
110124

111125
createNewMap();
112126

@@ -123,7 +137,7 @@ public RemoteDatastore(@Named(DATA_DIR_PROP) String dataDir,
123137
{
124138
flushMap();
125139

126-
Thread.sleep(2000);
140+
Thread.sleep(m_flushInterval);
127141
}
128142
catch (Exception e)
129143
{
@@ -138,6 +152,12 @@ public RemoteDatastore(@Named(DATA_DIR_PROP) String dataDir,
138152

139153
}
140154

155+
@Inject
156+
public void setMaxFileSize(@Named(MAX_FILE_SIZE_MB)long maxFileSize)
157+
{
158+
m_maxFileSize = maxFileSize * 1024 * 1024;
159+
}
160+
141161
private Multimap<DataPointKey, DataPoint> createNewMap()
142162
{
143163
Multimap<DataPointKey, DataPoint> ret;
@@ -161,6 +181,9 @@ private void flushMap()
161181
{
162182
try
163183
{
184+
//Check if we need to role to a new file because of size
185+
rollAndZipFile(System.currentTimeMillis(), true);
186+
164187
for (DataPointKey dataPointKey : flushMap.keySet())
165188
{
166189
//We have to clear the writer every time or it gets confused
@@ -359,7 +382,7 @@ private void cleanDiskSpace()
359382
Files.delete(zipFiles[0].toPath());
360383
logger.warn("Disk is too full to create zip file. Deleted older zip file " + zipFiles[0].getName() + " size: " + size);
361384
// For forwarding this metric will be reported both on the local kairos node and the remote
362-
m_publisher.post(new DataPointEvent(DELETE_ZIP_METRIC, ImmutableSortedMap.of("host", m_hostName),
385+
m_publisher.post(new DataPointEvent(DELETE_ZIP_METRIC, m_tags,
363386
m_longDataPointFactory.createDataPoint(System.currentTimeMillis(), size)));
364387
cleanDiskSpace(); // continue cleaning until space is freed up or all zip files are deleted.
365388
}
@@ -376,39 +399,65 @@ private boolean hasSpace()
376399
return m_dropPercent >= 100 || m_diskUtils.percentAvailable(m_dataDirectory) < m_dropPercent;
377400
}
378401

379-
void sendData() throws IOException
402+
//Rolls to a new file and zips up the current one
403+
private void rollAndZipFile(long now, boolean conditionalRoll) throws IOException
380404
{
381-
synchronized (m_sendLock)
382-
{
383-
String oldDataFile = m_dataFileName;
384-
long now = System.currentTimeMillis();
385-
386-
long fileSize = (new File(m_dataFileName)).length();
405+
int dataPointCounter;
406+
String oldDataFile;
407+
long fileSize;
387408

388-
ImmutableSortedMap<String, String> tags = ImmutableSortedMap.<String, String>naturalOrder()
389-
.put("host", m_hostName)
390-
.build();
409+
synchronized (m_dataFileLock)
410+
{
411+
fileSize = (new File(m_dataFileName)).length();
391412

392-
int dataPointCounter;
393-
synchronized (m_dataFileLock)
413+
if (conditionalRoll)
394414
{
395-
closeDataFile();
396-
dataPointCounter = m_dataPointCounter;
397-
openDataFile();
415+
//Check file size
416+
if (fileSize < m_maxFileSize)
417+
return;
398418
}
399419

400-
long zipSize = zipFile(oldDataFile);
420+
oldDataFile = m_dataFileName;
421+
422+
closeDataFile();
423+
//m_dataPointCounter gets reset in openDataFile()
424+
dataPointCounter = m_dataPointCounter;
425+
openDataFile();
426+
}
427+
428+
long zipSize = zipFile(oldDataFile);
429+
430+
try
431+
{
432+
putDataPoint(new DataPointEvent(FILE_SIZE_METRIC, m_tags, m_longDataPointFactory.createDataPoint(now, fileSize), 0));
433+
putDataPoint(new DataPointEvent(WRITE_SIZE_METRIC, m_tags, m_longDataPointFactory.createDataPoint(now, dataPointCounter), 0));
434+
putDataPoint(new DataPointEvent(ZIP_FILE_SIZE_METRIC, m_tags, m_longDataPointFactory.createDataPoint(now, zipSize), 0));
435+
}
436+
catch (DatastoreException e)
437+
{
438+
logger.error("Error writing remote metrics", e);
439+
}
440+
}
441+
442+
//Called by RemoteSendJob that is on a timer set in config
443+
void sendData() throws IOException
444+
{
445+
synchronized (m_sendLock)
446+
{
447+
448+
long now = System.currentTimeMillis();
449+
m_sendTimer.start();
450+
451+
rollAndZipFile(now, false);
401452

402453
sendAllZipfiles();
403454

404-
long timeToSend = System.currentTimeMillis() - now;
455+
long timeToSend = m_sendTimer.elapsed(TimeUnit.MILLISECONDS);
456+
m_sendTimer.reset();
405457

406458
try
407459
{
408-
putDataPoint(new DataPointEvent(FILE_SIZE_METRIC, tags, m_longDataPointFactory.createDataPoint(now, fileSize), 0));
409-
putDataPoint(new DataPointEvent(WRITE_SIZE_METRIC, tags, m_longDataPointFactory.createDataPoint(now, dataPointCounter), 0));
410-
putDataPoint(new DataPointEvent(ZIP_FILE_SIZE_METRIC, tags, m_longDataPointFactory.createDataPoint(now, zipSize), 0));
411-
putDataPoint(new DataPointEvent(TIME_TO_SEND_METRIC, tags, m_longDataPointFactory.createDataPoint(now, timeToSend), 0));
460+
putDataPoint(new DataPointEvent(TIME_TO_SEND_METRIC, m_tags, m_longDataPointFactory.createDataPoint(now, timeToSend), 0));
412461
}
413462
catch (DatastoreException e)
414463
{

src/main/java/org/kairosdb/plugin/remote/RemoteModule.java

-28
This file was deleted.

0 commit comments

Comments
 (0)