flume基本配置详情

原创
2015/07/06 17:24
阅读数 169

############################################

#  producer config

############################################

#agent section

producer.sources = s

producer.channels = c c1 c2

producer.sinks = r h es


#source section

producer.sources.s.type =exec

producer.sources.s.command = tail -f /usr/local/nginx/logs/test1.log

#producer.sources.s.type = spooldir

#producer.sources.s.spoolDir = /usr/local/nginx/logs/

#producer.sources.s.fileHeader = true


producer.sources.s.channels = c c1 c2


producer.sources.s.interceptors = i

#不支持忽略大小写

producer.sources.s.interceptors.i.regex = .*\.(css|js|jpg|jpeg|png|gif|ico).*

producer.sources.s.interceptors.i.type = org.apache.flume.interceptor.RegexFilteringInterceptor$Builder

#不包含

producer.sources.s.interceptors.i.excludeEvents = true


############################################

#   hdfs config

############################################

producer.channels.c.type = memory

#Timeout in seconds for adding or removing an event

producer.channels.c.keep-alive= 30

producer.channels.c.capacity = 10000

producer.channels.c.transactionCapacity = 10000

producer.channels.c.byteCapacityBufferPercentage = 20

producer.channels.c.byteCapacity = 800000


producer.sinks.r.channel = c


producer.sinks.r.type = avro

producer.sinks.r.hostname  = 127.0.0.1

producer.sinks.r.port = 10101

############################################

#   hdfs config

############################################

producer.channels.c1.type = memory

#Timeout in seconds for adding or removing an event

producer.channels.c1.keep-alive= 30

producer.channels.c1.capacity = 10000

producer.channels.c1.transactionCapacity = 10000

producer.channels.c1.byteCapacityBufferPercentage = 20

producer.channels.c1.byteCapacity = 800000


producer.sinks.h.channel = c1


producer.sinks.h.type = hdfs

#目录位置

producer.sinks.h.hdfs.path = hdfs://127.0.0.1/tmp/flume/%Y/%m/%d

#文件前缀

producer.sinks.h.hdfs.filePrefix=nginx-%Y-%m-%d-%H

producer.sinks.h.hdfs.fileType = DataStream

#时间类型必加,不然会报错

producer.sinks.h.hdfs.useLocalTimeStamp = true

producer.sinks.h.hdfs.writeFormat = Text

#hdfs创建多长时间新建文件,0不基于时间

#Number of seconds to wait before rolling current file (0 = never roll based on time interval)

producer.sinks.h.hdfs.rollInterval=0

hdfs多大时新建文件,0不基于文件大小

#File size to trigger roll, in bytes (0: never roll based on file size)

producer.sinks.h.hdfs.rollSize = 0

#hdfs有多少条消息时新建文件,0不基于消息个数

#Number of events written to file before it rolled (0 = never roll based on number of events)

producer.sinks.h.hdfs.rollCount = 0

#批量写入hdfs的个数

#number of events written to file before it is flushed to HDFS

producer.sinks.h.hdfs.batchSize=1000

#flume操作hdfs的线程数(包括新建,写入等)

#Number of threads per HDFS sink for HDFS IO ops (open, write, etc.)

producer.sinks.h.hdfs.threadsPoolSize=15

#操作hdfs超时时间

#Number of milliseconds allowed for HDFS operations, such as open, write, flush, close. This number should be increased if many HDFS timeout operations are occurring.

producer.sinks.h.hdfs.callTimeout=30000




hdfs.round

false

Should the timestamp be rounded down (if true, affects all time based escape sequences except %t)



hdfs.roundValue

Rounded down to the highest multiple of this (in the unit configured using hdfs.roundUnit), less than current time.



hdfs.roundUnit

second

The unit of the round down value - second, minute or hour.


############################################

#   elasticsearch config

############################################

producer.channels.c2.type = memory

#Timeout in seconds for adding or removing an event

producer.channels.c2.keep-alive= 30

producer.channels.c2.capacity = 10000

producer.channels.c2.transactionCapacity = 10000

producer.channels.c2.byteCapacityBufferPercentage = 20

producer.channels.c2.byteCapacity = 800000


producer.sinks.es.channel = c2


producer.sinks.es.type = org.apache.flume.sink.elasticsearch.ElasticSearchSink

producer.sinks.es.hostNames = 127.0.0.1:9300

#Name of the ElasticSearch cluster to connect to

producer.sinks.es.clusterName = sunxucool

#Number of events to be written per txn.

producer.sinks.es.batchSize = 1000

#The name of the index which the date will be appended to. Example ‘flume’ -> ‘flume-yyyy-MM-dd’

producer.sinks.es.indexName = flume_es

#The type to index the document to, defaults to ‘log’

producer.sinks.es.indexType = test

producer.sinks.es.serializer = org.apache.flume.sink.elasticsearch.ElasticSearchLogStashEventSerializer

展开阅读全文
打赏
0
1 收藏
分享
加载中
更多评论
打赏
0 评论
1 收藏
0
分享
返回顶部
顶部