TrafficServer源码:the possible operations or msg types sent from remote client to TM

// trafficserver/mgmt/api/NetworkMessage.h
typedef enum
{
  FILE_READ,
  FILE_WRITE,
  RECORD_SET,
  RECORD_GET,
  PROXY_STATE_GET,
  PROXY_STATE_SET,
  RECONFIGURE,
  RESTART,
  BOUNCE,
  EVENT_RESOLVE,
  EVENT_GET_MLT,
  EVENT_ACTIVE,
  EVENT_REG_CALLBACK,
  EVENT_UNREG_CALLBACK,
  EVENT_NOTIFY,                 /* only msg sent from TM to client */
  SNAPSHOT_TAKE,
  SNAPSHOT_RESTORE,
  SNAPSHOT_REMOVE,
  SNAPSHOT_GET_MLT,
  DIAGS,
  STATS_RESET_NODE,
  STATS_RESET_CLUSTER,
  STORAGE_DEVICE_CMD_OFFLINE,
  RECORD_MATCH_GET,
  API_PING,
  SERVER_BACKTRACE,
  UNDEFINED_OP /* This must be last */
} OpType;

Share

使用 elasticsearch+logstash 存储获取实时日志【cdn realtime analytics】

安装的我就不写了。

主要说下方案

nginx 实时吐日志给syslog-ng via pipe
syslog-ng 向logstash 推送日志 via internet udp
logstash 把日志塞进elasticsearch 并index

发送方:
nginx.conf

# ...
log_format real_time '- $time_iso8601 $host $request_time $status $bytes_sent';
server {
        listen 80;
        server_name my_test_rt;
        access_log /dev/realtime.pipe real_time;
        location /{
                proxy_pass http://backend.com;
        }
    }
# ...

syslog-ng.conf

source s_pipe {
	pipe("/dev/realtime.pipe"); };

destination d_udp { udp("127.0.0.1" port(9999) template ("$MSG\n") ); };

log {source(s_pipe); destination(d_udp); };
#创建一个管道:
makefifo /dev/realtime.pipe

#先启动syslog-ng
#不然nginx启动时会卡住
service syslog-ng start

service nginx start

接收方:
/etc/logstash/conf.d/rt.conf

input {
	udp {
		port =>9999
	}
}

filter {
  grok {
    pattern => ["%{TIMESTAMP_ISO8601:timestamp} %{IPORHOST:host} %{IPORHOST:domain} %{NUMBER:request_time} %{NUMBER:status} %{NUMBER:bytes_sent}" ]
  }
  mutate {
    remove_field => [ "message", "@version" ]
  }
}
output {
	elasticsearch {
		host => "127.0.0.1"
		flush_size => 1
		index => "rt-%{+YYYY.MM.dd.HH.mm}"
	}
}

把logstash 和 elasticsearch 都启动 。整个体系就运转起来了

Share

squid logformat (适用版本2.7)

摘抄下官方手册,部分翻译

Usage:

	logformat <name> <format specification>

	Defines an access log format.

	The <format specification> is a string with embedded % format codes

	% format codes all follow the same basic structure where all but
	the formatcode is optional. Output strings are automatically escaped
	as required according to their context and the output format
	modifiers are usually not needed, but can be specified if an explicit
	output format is desired.

		% ["|[|'|#] [-] [[0]width] [{argument}] formatcode

		"	output in quoted string format
		[	output in squid text log format as used by log_mime_hdrs
		#	output in URL quoted format
		'	output as-is

		-	left aligned
		width	field width. If starting with 0 the
			output is zero padded
		{arg}	argument such as header name etc

	Format codes:

		>a	Client source IP address / 客户端 IP
		>A	Client FQDN / 客户端完整域名,是指主机名+全路径
		>p	Client source port / 客户端端口
		<A	Server IP address or peer name / 服务器ip 或者 对端名
		la	Local IP address (http_port) / 本地ip地址
		lp	Local port number (http_port) / 本地端口号
		oa	Our outgoing IP address (tcp_outgoing_address) / 传出的ip地址
		ts	Seconds since epoch / unix时间戳
		tu	subsecond time (milliseconds) / 毫秒
		tl	Local time. Optional strftime format argument / 本地时间
			default %d/%b/%Y:%H:%M:%S %z
		tg	GMT time. Optional strftime format argument / 格林尼治标准时间
			default %d/%b/%Y:%H:%M:%S %z
		tr	Response time (milliseconds) / 响应时间(毫秒)
		>h	Request header. Optional header name argument 
			on the format header[:[separator]element] /请求头
		<h	Reply header. Optional header name argument 
			as for >h / 响应头
		un	User name /用户名
		ul	User name from authentication /认证的用户名
		ui	User name from ident 
		us	User name from SSL
		ue	User name from external acl helper
		Hs	HTTP status code / http 状态码
		Ss	Squid request status (TCP_MISS etc) / squid 请求状态
		Sh	Squid hierarchy status (DEFAULT_PARENT etc) / squid 层次结构状态
		mt	MIME content type / MIME内容类型
		rm	Request method (GET/POST etc) /请求方法
		ru	Request URL /请求的url
		rp	Request URL-Path excluding hostname /不包含主机名的请求url
		rv	Request protocol version / 请求协议版本
		ea	Log string returned by external acl
		<st	Reply size including HTTP headers /响应大小,包括HTTP头
		>st	Request size including HTTP headers /请求大小,包括HTTP头
		st	Request+Reply size including HTTP headers /请求大小+响应大小,包括HTTP头
		sn	Unique sequence number per log line entry /唯一序列号
		%	a literal % character 
Share

haproxy 配置

自己的测试环境
squid listen :8081
haproxy listen :80

lobal
        maxconn 1024 #限制单个进程的最大连接数
        chroot /haproxy #安装路径
        uid 99 #运行用户 99==nobody
        gid 99 #运行组 99==nobody
        daemon #as 守护进程
        quiet 
        nbproc 10 #启动多少个进程
        pidfile /haproxy/run.pid # pid 文件的路径

defaults
        log global
        mode http
        option httplog
        option dontlognull
        log 127.0.0.1 local3 info #日志级别[err warning info debug]
        retries 3 #在一个服务器上连接失败后重试次数
        option redispatch #连接失败或断开后允许当前会话被重新分配
        maxconn 1024 #连后端服务器的最大连接数
        contimeout 100ms #连接超时
        clitimeout 10000ms #客户端的连接超时
        srvtimeout 10000ms #服务端的连接超时

listen cluster 0.0.0.0:80 #监听 host:port
        mode http #http 7层模式
        balance roundrobin # 负载方式
        option httpclose 
        option forwardfor
        server internalweb1 127.0.0.1:8081 weight 5
        #weight 权重
        #check 健康检查 【我没配置】
        #inter n 两次check间隔时间ms(检查粒度)
        #rise n 指定成功检测n次后服务可用
        #fall n 指定检测n次失败后服务不可用
        #maxconn n 指定最大并发连接数

参考网址

Share