0

My application runs a sampling query periodically and it was working fine without any issue for around 30 hours. Then suddenly it gave the following error.

Java client is unable to access the Database and get the following exception.

Caused by: com.toshiba.mwcloud.gs.common.GSConnectionException: [145028:JC_BAD_CONNECTION] Failed to update by notification (address=/239.0.0.1:31999, reason=Receive timed out)
at com.toshiba.mwcloud.gs.subnet.NodeResolver.updateMasterInfo(NodeResolver.java:815)
at com.toshiba.mwcloud.gs.subnet.NodeResolver.prepareConnectionAndClusterInfo(NodeResolver.java:522)
at com.toshiba.mwcloud.gs.subnet.NodeResolver.getPartitionCount(NodeResolver.java:205)
at com.toshiba.mwcloud.gs.subnet.GridStoreChannel$5.execute(GridStoreChannel.java:2106)
at com.toshiba.mwcloud.gs.subnet.GridStoreChannel.executeStatement(GridStoreChannel.java:1675)
... 38 more
Caused by: java.net.SocketTimeoutException: Receive timed out

Why is this happening? What is the cause.

This is the output of gs_stat -u admin/admin

{
"checkpoint": {
"archiveLog": 0,
"backupOperation": 0,
"duplicateLog": 0,
"endTime": 1580053987745,
"mode": "NORMAL_CHECKPOINT",
"normalCheckpointOperation": 1470,
"pendingPartition": 0,
"periodicCheckpoint": "ACTIVE",
"requestedCheckpointOperation": 0,
"startTime": 1580053987741
},
"cluster": {
"activeCount": 0,
"autoGoal": "ACTIVE",
"clusterName": "defaultCluster",
"clusterRevisionId": "4e9be62e-7911-48a4-8d93-19af09be7a15",
"clusterRevisionNo": 17651,
"clusterStatus": "SUB_CLUSTER",
"designatedCount": 1,
"loadBalancer": "ACTIVE",
"nodeList": [
{
"address": "10.128.0.2",
"port": 10040
}
],
"nodeStatus": "ABNORMAL",
"notificationMode": "MULTICAST",
"partitionStatus": "INITIAL",
"startupTime": "2020-01-25T15:20:31.377Z",
"syncCount": 0
},
"currentTime": "2020-01-26T17:20:39Z",
"performance": {
"backupCount": 0,
"batchFree": 0,
"bufferHashCollisionCount": 0,
"checkpointFileAllocateSize": 5443584,
"checkpointFileFlushCount": 0,
"checkpointFileFlushTime": 0,
"checkpointFileSize": 5439488,
"checkpointFileUsageRate": 0.927710843373494,
"checkpointMemory": 196608,
"checkpointMemoryLimit": 1073741824,
"checkpointWriteSize": 270139392,
"checkpointWriteTime": 214,
"currentCheckpointWriteBufferSize": 0,
"currentTime": 1580059239771,
"expirationDetail": {
"autoExpire": false,
"erasableExpiredTime": "1970-01-01T00:00:00.000Z",
"latestExpirationCheckTime": "1970-01-01T00:00:00.000Z"
},
"logFileFlushCount": 8832,
"logFileFlushTime": 38224,
"numBackground": 0,
"numConnection": 2,
"numNoExpireTxn": 0,
"numSession": 0,
"numTxn": 0,
"ownerCount": 128,
"peakProcessMemory": 86626304,
"processMemory": 86626304,
"recoveryReadSize": 262144,
"recoveryReadTime": 0,
"recoveryReadUncompressTime": 0,
"storeCompressionMode": "NO_BLOCK_COMPRESSION",
"storeDetail": {
"batchFreeMapData": {
"storeMemory": 0,
"storeUse": 0,
"swapRead": 0,
"swapWrite": 0
},
"batchFreeRowData": {
"storeMemory": 0,
"storeUse": 0,
"swapRead": 0,
"swapWrite": 0
},
"mapData": {
"storeMemory": 131072,
"storeUse": 131072,
"swapRead": 0,
"swapWrite": 0
},
"metaData": {
"storeMemory": 131072,
"storeUse": 131072,
"swapRead": 0,
"swapWrite": 0
},
"rowData": {
"storeMemory": 4784128,
"storeUse": 4784128,
"swapRead": 0,
"swapWrite": 0
}
},
"storeMemory": 5046272,
"storeMemoryLimit": 1073741824,
"storeTotalUse": 5046272,
"swapRead": 0,
"swapReadSize": 0,
"swapReadTime": 0,
"swapReadUncompressTime": 0,
"swapWrite": 0,
"swapWriteCompressTime": 0,
"swapWriteSize": 0,
"swapWriteTime": 0,
"syncReadSize": 0,
"syncReadTime": 0,
"syncReadUncompressTime": 0,
"totalBackupLsn": 0,
"totalLockConflictCount": 0,
"totalOtherLsn": 0,
"totalOwnerLsn": 110220,
"totalReadOperation": 4733,
"totalRowRead": 2325894,
"totalRowWrite": 55108,
"totalWriteOperation": 55108,
"txnDetail": {
"totalBackgroundOperation": 0
}
},
"recovery": {
"progressRate": 1
},
"version": "4.3.0-36424 CE"
}
Codelicious
  • 140
  • 9
HarshaXsoad
  • 658
  • 8
  • 25

1 Answers1

1

It seems that it is being dealt separately at GitHub. Please refer to this post: https://github.com/griddb/griddb_nosql/issues/235

Codelicious
  • 140
  • 9