apim_metrics作为分析,诊断日志,开启它非常有必要
wso2am-4.x.x/repository/conf
appenders = APIM_METRICS_APPENDER, .... (list of other available appenders)
appender.APIM_METRICS_APPENDER.type = RollingFile
appender.APIM_METRICS_APPENDER.name = APIM_METRICS_APPENDER
appender.APIM_METRICS_APPENDER.fileName = ${sys:carbon.home}/repository/logs/apim_metrics.log
appender.APIM_METRICS_APPENDER.filePattern = ${sys:carbon.home}/repository/logs/apim_metrics-%d{MM-dd-yyyy}-%i.log
appender.APIM_METRICS_APPENDER.layout.type = PatternLayout
appender.APIM_METRICS_APPENDER.layout.pattern = %d{HH:mm:ss,SSS} [%X{ip}-%X{host}] [%t] %5p %c{1} %m%n
appender.APIM_METRICS_APPENDER.policies.type = Policies
appender.APIM_METRICS_APPENDER.policies.time.type = TimeBasedTriggeringPolicy
appender.APIM_METRICS_APPENDER.policies.time.interval = 1
appender.APIM_METRICS_APPENDER.policies.time.modulate = true
appender.APIM_METRICS_APPENDER.policies.size.type = SizeBasedTriggeringPolicy
appender.APIM_METRICS_APPENDER.policies.size.size=1000MB
appender.APIM_METRICS_APPENDER.strategy.type = DefaultRolloverStrategy
appender.APIM_METRICS_APPENDER.strategy.max = 10
loggers = reporter, ...(list of other available loggers)
logger.reporter.name = org.wso2.am.analytics.publisher.reporter.elk
logger.reporter.level = INFO
logger.reporter.additivity = false
logger.reporter.appenderRef.APIM_METRICS_APPENDER.ref = APIM_METRICS_APPENDER
The apim_metrics.log file be rolled each day or when the log size reaches the limit of 1000 MB by default. Note that only 10 revisions will be kept and older revisions will be deleted automatically. You can change these configurations by updating the configurations provided in step 2 of this section given above.
终端用户-》自己建立的应用-》wso2-api接口
{"apiName":"user-register","proxyResponseCode":200,"destination":"https://test.ddd.com/user-
register","apiCreatorTenantDomain":"carbon.super","platform":"Other","apiMethod":"GET","apiVersion":"1.0.0","gatewayType":"SYNAPSE","apiCreator":"admin","responseCacheHit":false,"backendLatency":111,"correlationId":
"0e5482a5-b281-4b91-a728-1b90f443110c","requestMediationLatency":389,"keyType":"PRODUCTION","apiId":"d642741c-b34a-4fde-8e47-5ef70455f638","applicationName":"test1","targetResponseCode":200,"requestTimestamp":"2025-
05-19T02:01:28.765Z","applicationOwner":"admin","userAgent":"PostmanRuntime","userName":"admin@carbon.super","apiResourceTemplate":"/*","regionId":"default","responseLatency":511,"responseMediationLatency":11,"userI
p":"111.1.1.2","apiContext":"/user/1.0.0","applicationId":"a18b9944-5ddf-4708-9922-a45e04474f81","apiType":"HTTP","properties":{"commonName":"N/A","responseContentType":"application/json","subtype":"D
EFAULT","isEgress":false,"apiContext":"/user-register/1.0.0","responseSize":0,"userName":"admin@carbon.super"}}
可以二级开发/home/wso2carbon/wso2am-4.5.0/repository/components/plugins/org.wso2.am.analytics.publisher.client_1.2.23.jar
这个模块,这里已经集成了远程推送和elk日志记录,我们可以扩展一个kafka推送,扩展完代码之后,进行编译,覆盖源来的jar包即可
@Override
public int incrementCount(MetricEventBuilder builder) throws MetricReportingException {
Map event = builder.build();
String jsonString = gson.toJson(event);
String jsonStringResult = jsonString.replaceAll("[rn]", "");
log.info("apimMetrics: " + name.replaceAll("[rn]", "") + ", properties :" +
jsonStringResult);
KafkaMqProducer.publishEvent("apim-metrics", jsonStringResult);
return 0;
}
$ mvn clean install -DskipTests -Dcheckstyle.skip
/**
* kafka生产者.
*/
public class KafkaMqProducer {
private final static String BOOTSTRAP_SERVER = ConfigFactory.getInstance().getStrPropertyValue("kafka.host");
private static final Logger logger = LogManager.getLogger(KafkaMqProducer.class);
private static KafkaProducer producer;
private static ExecutorService executorService = Executors.newFixedThreadPool(4);
private static KafkaProducer getProducer() {
if (producer == null) {
//reset thread context
resetThreadContext();
// create the producer
producer = new KafkaProducer(getProperties());
}
return producer;
}
public static void publishEvent(String topic, String value) {
executorService.execute(() -> {
try {
// create a producer record
ProducerRecord eventRecord =
new ProducerRecord(topic, value);
// send data - asynchronous
getProducer().send(eventRecord, new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if (e != null) {
e.printStackTrace();
}
}
});
} catch (Exception ex) {
logger.error("kafka.error", ex);
}
});
}
private static void resetThreadContext() {
Thread.currentThread().setContextClassLoader(null);
}
public static Properties getProperties() {
Properties properties = new Properties();
properties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVER);
properties.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
properties.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
properties.setProperty(ProducerConfig.BATCH_SIZE_CONFIG, "16384");
return properties;
}
}
如果需要集成第三方组件,如kafka,rabbitmq这些,需要将他们的原始jar包添加到/home/wso2carbon/wso2am-4.5.0/lib
目录下,你可以把这个目录当成是共享目录,这里的jar可以被其它模块加载,类似jboss中的模块,但咱们OSGi平台不需要通过jboss-deployment-structure.xml显示指定它,如果你是docker部署的,可以在原始镜像基础之上,添加这些jar包。
Dockerfile
# 基于官方 WSO2 APIM 镜像
FROM wso2/wso2am:4.5.0
# 第三方jar包,需要放到lib目录
COPY lib/*.jar /home/wso2carbon/wso2am-4.5.0/lib/
# 业务插件包,替换或覆盖目标 JAR 文件,重新构建docker镜像后需要更新一下values.yaml里的sha256这个值,告诉服务器使用最新的镜像
COPY plugins/*.jar /home/wso2carbon/wso2am-4.5.0/repository/components/plugins/
参与评论
手机查看
返回顶部