Skip to content

Instantly share code, notes, and snippets.

@maketheworldwise
Last active June 11, 2023 15:00
Show Gist options
  • Save maketheworldwise/daee1ce195d4cc724f5cc15631f251e7 to your computer and use it in GitHub Desktop.
Save maketheworldwise/daee1ce195d4cc724f5cc15631f251e7 to your computer and use it in GitHub Desktop.
[Spring] AWS Cloudwatch Logback Appender 설정

기본 설정

# logback.yml
<configuration packagingData="true">

    <!-- Timestamp used into the Log Stream Name -->
    <timestamp key="timestamp" datePattern="yyyy-MM-dd-HH-mm-ssSSS"/>

    <!-- The actual AwsLogsAppender (asynchronous mode because of maxFlushTimeMillis > 0) -->
    <appender name="COLOR" class="ca.pjer.logback.AwsLogsAppender">

        <!-- Send only ERROR and above -->
        <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
            <level>ERROR</level>
        </filter>

        <!-- Nice layout pattern -->
        <layout>
            <pattern>[%date] %highlight([%level]) [%logger{10} %file:%line] %msg%n</pattern>
        </layout>

        <!-- Hardcoded Log Group Name -->
        <logGroupName>YAPP-log</logGroupName>

        <!-- Log Stream Name UUID Prefix -->
        <logStreamUuidPrefix>YAPP/</logStreamUuidPrefix>

        <!-- AWS Region -->
        <logRegion>ap-northeast-2</logRegion>

        <!-- Maximum number of events in each batch (50 is the default) -->
        <!-- will flush when the event queue has 50 elements, even if still in quiet time (see maxFlushTimeMillis) -->
        <maxBatchLogEvents>50</maxBatchLogEvents>

        <!-- Maximum quiet time in millisecond (0 is the default) -->
        <!-- will flush when met, even if the batch size is not met (see maxBatchLogEvents) -->
        <maxFlushTimeMillis>30000</maxFlushTimeMillis>

        <!-- Maximum block time in millisecond (5000 is the default) -->
        <!-- when > 0: this is the maximum time the logging thread will wait for the logger, -->
        <!-- when == 0: the logging thread will never wait for the logger, discarding events while the queue is full -->
        <maxBlockTimeMillis>5000</maxBlockTimeMillis>

        <!-- Retention value for log groups, 0 for infinite see -->
        <!-- https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutRetentionPolicy.html for other -->
        <!-- possible values -->

        <retentionTimeDays>0</retentionTimeDays>
    </appender>

    <!-- A console output -->
    <appender name="ASYNC_AWS_LOGS" class="ch.qos.logback.core.ConsoleAppender">
        <encoder>
            <pattern>[%date] %highlight([%level]) [%logger{10} %file:%line] %msg%n</pattern>
        </encoder>
    </appender>

    <!-- Root with a threshold to INFO and above -->
    <root level="INFO">
        <appender-ref ref="COLOR"/>
        <appender-ref ref="ASYNC_AWS_LOGS"/>
        <if condition='isDefined("color")'>
            <then>
                <appender-ref ref="COLOR"/>
            </then>
        </if>
    </root>
</configuration>

분리 (Default, Cloudwatch, Spring)

# logback-default-appender.xml
<included>
    <!-- console -->
    <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
        <encoder>
            <pattern>[%date] %highlight([%level]) [%logger{10} %file:%line] %msg%n</pattern>
        </encoder>
    </appender>
</included>
# logback-cloudwatch-appender.xml
<included>
    <!-- aws -->
    <appender name="AWS_CLOUDWATCH" class="ca.pjer.logback.AwsLogsAppender">
        <!-- INFO 레벨 출력 -->
        <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
            <level>INFO</level>
        </filter>

        <!-- 출력 패턴 -->
        <layout>
            <!-- <pattern>[%date] %highlight([%level]) [%logger{10} %file:%line] %msg%n</pattern> -->
             <pattern>[%date] %msg%n</pattern>
        </layout>

        <!-- 로그 그룹 -->
        <logGroupName>${LOG_GROUP}</logGroupName>
        <!-- 로그 그룹 - 로그 스트림 prefix -->
        <logStreamUuidPrefix>${LOG_GROUP_STREAM_PREFIX}</logStreamUuidPrefix>

        <!-- AWS 리전 -->
        <logRegion>ap-northeast-2</logRegion>

        <!-- 요소가 큐에 다 쌓이면 일괄 처리 (기본 50) -->
        <maxBatchLogEvents>50</maxBatchLogEvents>
        <!-- 배치 크기가 충족되지 않아도 30000 밀리초 이후 flush -->
        <maxFlushTimeMillis>30000</maxFlushTimeMillis>
        <!-- 로그 스레드가 로거를 기다리는 최대 시간 -->
        <maxBlockTimeMillis>5000</maxBlockTimeMillis>
        <!-- 로그 보유 기간 (기한 없을 경우 0) -->
        <retentionTimeDays>0</retentionTimeDays>
    </appender>

</included>
# logback-spring.xml
<configuration packagingData="true">

    <!-- timestamp -->
    <timestamp key="timestamp" datePattern="yyyy-MM-dd-HH-mm-ssSSS"/>

    <!-- console - local 환경에서 INFO 레벨 이상 출력 -->
    <include resource="logback-default-appender.xml"/>
    <root level="INFO">
        <appender-ref ref="STDOUT"/>
    </root>

    <!-- aws - prod 환경에서 INFO 레벨 이상 출력 -->
    <springProfile name="prod">

        <!-- 회원 -->
        <property name="LOG_GROUP" value="/aws/spring/user" />
        <property name="LOG_GROUP_STREAM_PREFIX" value="user/" />
        <include resource="logback-cloudwatch-appender.xml"/>
        <logger name="com.example.api.logger.user"  level="info" additivity="false">
            <appender-ref ref="AWS_CLOUDWATCH"/>
        </logger>

        <!-- 회원 제품 -->
        <property name="LOG_GROUP" value="/aws/spring/product" />
        <property name="LOG_GROUP_STREAM_PREFIX" value="product/" />
        <include resource="logback-cloudwatch-appender.xml"/>
        <logger name="com.example.api.logger.product"  level="info" additivity="false">
            <appender-ref ref="AWS_CLOUDWATCH"/>
        </logger>

        <!-- 포스트 -->
        <property name="LOG_GROUP" value="/aws/spring/post" />
        <property name="LOG_GROUP_STREAM_PREFIX" value="post/" />
        <include resource="logback-cloudwatch-appender.xml"/>
        <logger name="com.example.api.logger.post"  level="info" additivity="false">
            <appender-ref ref="AWS_CLOUDWATCH"/>
        </logger>

    </springProfile>

</configuration>
@maketheworldwise
Copy link
Author

@maketheworldwise
Copy link
Author

AWS 서비스 자체적으로 로그를 수집하는 기능이 존재하고, 로그 수집이 활성화 되었을 경우에는 굳이 사용할 필요는 없으나, 로그를 각 도메인별로 구성하고 싶을 때 사용하면 좋을 듯함.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment