0

AmazoneWeb Service Server にマウントされた Couchbase インスタンスと、同じサーバーで実行されている Elastic Search インスタンスがあります。

それらの 2 つの間の接続は正常に行われ、現在まで正常に複製されています... 突然、ElasticSearch で次のエラー ログを取得しました。

[2013-08-29 21:27:34,947][WARN ][cluster.metadata         ] [01-Thor] failed to dynamically update the mapping in cluster_state from shard
 java.lang.OutOfMemoryError: Java heap space
    at org.apache.lucene.util.ArrayUtil.grow(ArrayUtil.java:343)
    at org.elasticsearch.common.io.FastByteArrayOutputStream.write(FastByteArrayOutputStream.java:103)
    at org.elasticsearch.common.jackson.core.json.UTF8JsonGenerator._flushBuffer(UTF8JsonGenerator.java:1848)
    at org.elasticsearch.common.jackson.core.json.UTF8JsonGenerator.writeString(UTF8JsonGenerator.java:436)
    at org.elasticsearch.common.xcontent.json.JsonXContentGenerator.writeString(JsonXContentGenerator.java:84)
    at org.elasticsearch.common.xcontent.XContentBuilder.field(XContentBuilder.java:314)
    at org.elasticsearch.index.mapper.core.AbstractFieldMapper.doXContentBody(AbstractFieldMapper.java:601)
    at org.elasticsearch.index.mapper.core.NumberFieldMapper.doXContentBody(NumberFieldMapper.java:286)
    at org.elasticsearch.index.mapper.core.LongFieldMapper.doXContentBody(LongFieldMapper.java:338)
    at org.elasticsearch.index.mapper.core.AbstractFieldMapper.toXContent(AbstractFieldMapper.java:595)
    at org.elasticsearch.index.mapper.object.ObjectMapper.toXContent(ObjectMapper.java:920)
    at org.elasticsearch.index.mapper.object.ObjectMapper.toXContent(ObjectMapper.java:852)
    at org.elasticsearch.index.mapper.object.ObjectMapper.toXContent(ObjectMapper.java:920)
    at org.elasticsearch.index.mapper.object.ObjectMapper.toXContent(ObjectMapper.java:852)
    at org.elasticsearch.index.mapper.object.ObjectMapper.toXContent(ObjectMapper.java:920)
    at org.elasticsearch.index.mapper.object.ObjectMapper.toXContent(ObjectMapper.java:852)
    at org.elasticsearch.index.mapper.object.ObjectMapper.toXContent(ObjectMapper.java:920)
    at org.elasticsearch.index.mapper.object.ObjectMapper.toXContent(ObjectMapper.java:852)
    at org.elasticsearch.index.mapper.object.ObjectMapper.toXContent(ObjectMapper.java:920)
    at org.elasticsearch.index.mapper.DocumentMapper.toXContent(DocumentMapper.java:700)
    at org.elasticsearch.index.mapper.DocumentMapper.refreshSource(DocumentMapper.java:682)
    at org.elasticsearch.index.mapper.DocumentMapper.<init>(DocumentMapper.java:342)
    at org.elasticsearch.index.mapper.DocumentMapper$Builder.build(DocumentMapper.java:224)
    at org.elasticsearch.index.mapper.DocumentMapperParser.parse(DocumentMapperParser.java:231)
    at org.elasticsearch.index.mapper.MapperService.parse(MapperService.java:380)
    at org.elasticsearch.index.mapper.MapperService.merge(MapperService.java:190)
    at org.elasticsearch.cluster.metadata.MetaDataMappingService$2.execute(MetaDataMappingService.java:185)
    at org.elasticsearch.cluster.service.InternalClusterService$2.run(InternalClusterService.java:229)
    at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:95)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(Unknown Source)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(Unknown Source)
    at java.lang.Thread.run(Unknown Source)
[2013-08-29 21:27:56,948][WARN ][indices.ttl              ] [01-Thor] failed to execute ttl purge
 java.lang.OutOfMemoryError: Java heap space
    at org.apache.lucene.util.ByteBlockPool$Allocator.getByteBlock(ByteBlockPool.java:66)
    at org.apache.lucene.util.ByteBlockPool.nextBuffer(ByteBlockPool.java:202)
    at org.apache.lucene.util.BytesRefHash.add(BytesRefHash.java:319)
    at org.apache.lucene.util.BytesRefHash.add(BytesRefHash.java:274)
    at org.apache.lucene.search.ConstantScoreAutoRewrite$CutOffTermCollector.collect(ConstantScoreAutoRewrite.java:131)
    at org.apache.lucene.search.TermCollectingRewrite.collectTerms(TermCollectingRewrite.java:79)
    at org.apache.lucene.search.ConstantScoreAutoRewrite.rewrite(ConstantScoreAutoRewrite.java:95)
    at org.apache.lucene.search.MultiTermQuery$ConstantScoreAutoRewrite.rewrite(MultiTermQuery.java:220)
    at org.apache.lucene.search.MultiTermQuery.rewrite(MultiTermQuery.java:288)
    at org.apache.lucene.search.IndexSearcher.rewrite(IndexSearcher.java:639)
    at org.apache.lucene.search.IndexSearcher.createNormalizedWeight(IndexSearcher.java:686)
    at org.apache.lucene.search.IndexSearcher.search(IndexSearcher.java:309)
    at org.elasticsearch.indices.ttl.IndicesTTLService.purgeShards(IndicesTTLService.java:186)
    at org.elasticsearch.indices.ttl.IndicesTTLService.access$000(IndicesTTLService.java:65)
    at org.elasticsearch.indices.ttl.IndicesTTLService$PurgerThread.run(IndicesTTLService.java:122)

 [2013-08-29 21:29:23,919][WARN ][indices.ttl              ] [01-Thor] failed to execute ttl purge
 java.lang.OutOfMemoryError: Java heap space

いくつかのメモリ値を変更しようとしましたが、うまくいかないようです。

誰かが同じ問題を経験しましたか?

4

1 に答える 1

1

いくつかのトラブルシューティングのヒント:

  1. 通常、予測可能なパフォーマンスとデバッグの容易さのために、1 つの AWS インスタンスを Elasticsearch 専用にするのが賢明です。

  2. Bigdeskプラグインを使用してメモリ使用量を監視します。これは、メモリのボトルネックが Elasticsearch から発生しているかどうかを示します。OS、同時の大量のクエリとインデックス作成、または予期しない何かが原因である可能性があります。

  3. Elasticsearch の Java ヒープは、ボックスの合計メモリの約 50% に設定する必要があります。

  4. Shay Banon によるこのGistは、Elasticsearch でのメモリの問題を解決するためのいくつかのソリューションを提供します。

于 2013-09-10T05:55:28.413 に答える