first commit

This commit is contained in:
2024-08-22 14:05:05 +09:00
commit 1ab0d83941
34 changed files with 4083 additions and 0 deletions

6
admintool/.env Normal file
View File

@@ -0,0 +1,6 @@
MYSQL_HOST=localhost
MYSQL_PORT=3306
MYSQL_ROOT_PASSWORD=root!
MYSQL_DATABASE=user_stat
MYSQL_USER=khlee103
MYSQL_PASSWORD=123

View File

@@ -0,0 +1,10 @@
[client]
default-character-set = utf8mb4
[mysql]
default-character-set = utf8mb4
[mysqld]
character-set-client-handshake = FALSE
character-set-server = utf8mb4
collation-server = utf8mb4_unicode_ci

View File

@@ -0,0 +1,420 @@
-- --------------------------------------------------------
-- 호스트: 127.0.0.1
-- 서버 버전: 10.11.6-MariaDB-1:10.11.6+maria~ubu2204 - mariadb.org binary distribution
-- 서버 OS: debian-linux-gnu
-- HeidiSQL 버전: 12.6.0.6765
-- --------------------------------------------------------
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET NAMES utf8 */;
/*!50503 SET NAMES utf8mb4 */;
/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
/*!40103 SET TIME_ZONE='+00:00' */;
/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
-- caliverse 데이터베이스 구조 내보내기
CREATE DATABASE IF NOT EXISTS `caliverse` /*!40100 DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_bin */;
USE `caliverse`;
-- 테이블 caliverse.admin 구조 내보내기
CREATE TABLE IF NOT EXISTS `admin` (
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`group_id` bigint(20) DEFAULT NULL,
`email` varchar(255) NOT NULL,
`name` varchar(255) NOT NULL,
`password` varchar(255) NOT NULL,
`status` varchar(255) NOT NULL,
`deleted` bit(1) NOT NULL DEFAULT b'0',
`pw_update_dt` datetime NOT NULL DEFAULT current_timestamp(),
`create_dt` datetime NOT NULL DEFAULT current_timestamp(),
`update_by` varchar(255) DEFAULT NULL,
`update_dt` datetime NOT NULL DEFAULT current_timestamp(),
PRIMARY KEY (`id`),
KEY `group_id` (`group_id`)
) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
-- 테이블 데이터 caliverse.admin:~1 rows (대략적) 내보내기 admin pw : caliverse1!
INSERT INTO `admin` (`id`, `group_id`, `email`, `name`, `password`, `status`, `deleted`, `pw_update_dt`, `create_dt`, `update_by`, `update_dt`) VALUES
(1, 1, 'caliverse_adm@caliverse.io', 'admin', '$2a$10$YU8mg7ITQlFImlj0PwvvbuXQJK7WgEFlQ03T5mhrWrbHbbGiQ3sj2', 'PERMITTED', b'0', '2024-07-30 16:59:23', '2024-01-26 16:05:23', NULL, '2024-01-26 16:05:23');
-- 테이블 caliverse.admin_history 구조 내보내기
CREATE TABLE IF NOT EXISTS `admin_history` (
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`admin_id` bigint(20) NOT NULL,
`password` varchar(255) NOT NULL,
PRIMARY KEY (`id`),
KEY `FK__admin` (`admin_id`)
) ENGINE=InnoDB AUTO_INCREMENT=14 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT='어드민 기존 패스워드 관리 테이블';
-- 테이블 데이터 caliverse.admin_history:~0 rows (대략적) 내보내기
-- 테이블 caliverse.admin_log 구조 내보내기
CREATE TABLE IF NOT EXISTS `admin_log` (
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`admin_id` bigint(20) NOT NULL,
`name` varchar(255) NOT NULL,
`mail` varchar(255) NOT NULL,
`type` varchar(255) NOT NULL,
`content` longtext DEFAULT NULL,
`create_dt` datetime NOT NULL DEFAULT current_timestamp(),
PRIMARY KEY (`id`) USING BTREE,
KEY `admin_id` (`admin_id`)
) ENGINE=InnoDB AUTO_INCREMENT=18 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
-- 테이블 데이터 caliverse.admin_log:~0 rows (대략적) 내보내기
-- 테이블 caliverse.authority 구조 내보내기
CREATE TABLE IF NOT EXISTS `authority` (
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`auth_menu` varchar(255) NOT NULL,
`auth_name` varchar(255) NOT NULL,
`create_by` varchar(255) DEFAULT NULL,
`create_dt` datetime NOT NULL DEFAULT current_timestamp(),
`update_by` varchar(255) DEFAULT NULL,
`update_dt` datetime NOT NULL DEFAULT current_timestamp(),
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=32 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
-- 테이블 데이터 caliverse.authority:~31 rows (대략적) 내보내기
INSERT INTO `authority` (`id`, `auth_menu`, `auth_name`, `create_by`, `create_dt`, `update_by`, `update_dt`) VALUES
(1, 'ADMINSEARCH', 'READ', NULL, '2023-08-23 17:49:41', NULL, '2023-08-23 17:49:41'),
(2, 'ADMINSEARCH', 'CONFIRM', NULL, '2023-08-23 20:25:00', NULL, '2023-08-23 20:25:00'),
(3, 'ADMINSEARCH', 'UPDATE', NULL, '2023-08-23 20:25:00', NULL, '2023-08-23 20:25:00'),
(4, 'ADMINSEARCH', 'DELETE', NULL, '2023-08-23 20:25:00', NULL, '2023-08-23 20:25:00'),
(5, 'ADMINLOGSEARCH', 'READ', NULL, '2023-08-23 20:32:32', NULL, '2023-08-23 20:32:32'),
(6, 'AUTHORITYSETTING', 'READ', NULL, '2023-08-23 20:32:44', NULL, '2023-08-23 20:32:44'),
(7, 'AUTHORITYSETTING', 'UPDATE', NULL, '2023-08-23 20:32:44', NULL, '2023-08-23 20:32:44'),
(8, 'AUTHORITYSETTING', 'DELETE', NULL, '2023-08-23 20:32:44', NULL, '2023-08-23 20:32:44'),
(9, 'USERINDICATORS', 'READ', NULL, '2023-09-04 10:48:12', NULL, '2023-09-04 10:48:12'),
(10, 'ECOMINDICATORS', 'READ', NULL, '2023-09-04 10:48:12', NULL, '2023-09-04 10:48:12'),
(11, 'USERSEARCH', 'READ', NULL, '2023-09-04 10:50:00', NULL, '2023-09-04 10:50:00'),
(12, 'USERSEARCH', 'UPDATE', NULL, '2023-09-04 10:50:00', NULL, '2023-09-04 10:50:00'),
(13, 'CONTENTSEARCH', 'READ', NULL, '2023-09-04 10:50:00', NULL, '2023-09-04 10:50:00'),
(14, 'GAMELOG', 'READ', NULL, '2023-09-04 10:51:28', NULL, '2023-09-04 10:51:28'),
(15, 'CRYPTO', 'READ', NULL, '2023-09-04 10:51:49', NULL, '2023-09-04 10:51:49'),
(16, 'INGAME', 'READ', NULL, '2023-09-04 10:52:11', NULL, '2023-09-04 10:52:11'),
(17, 'INGAME', 'UPDATE', NULL, '2023-09-04 10:52:11', NULL, '2023-09-04 10:52:11'),
(18, 'INGAME', 'DELETE', NULL, '2023-09-04 10:52:11', NULL, '2023-09-04 10:52:11'),
(19, 'WHITELIST', 'READ', NULL, '2023-09-04 10:52:59', NULL, '2023-09-04 10:52:59'),
(20, 'WHITELIST', 'CONFIRM', NULL, '2023-09-04 10:56:52', NULL, '2023-09-04 10:56:52'),
(21, 'WHITELIST', 'UPDATE', NULL, '2023-09-04 10:52:59', NULL, '2023-09-04 10:52:59'),
(22, 'MAIL', 'READ', NULL, '2023-09-04 10:52:59', NULL, '2023-09-04 10:52:59'),
(23, 'MAIL', 'UPDATE', NULL, '2023-09-04 10:52:59', NULL, '2023-09-04 10:52:59'),
(24, 'BLACKLIST', 'READ', NULL, '2023-09-04 10:52:59', NULL, '2023-09-04 10:52:59'),
(25, 'BLACKLIST', 'UPDATE', NULL, '2023-09-04 10:54:11', NULL, '2023-09-04 10:54:11'),
(26, 'REPORT', 'READ', NULL, '2023-09-04 10:54:24', NULL, '2023-09-04 10:54:24'),
(27, 'REPORT', 'UPDATE', NULL, '2023-09-04 10:54:32', NULL, '2023-09-04 10:54:32'),
(28, 'WHITELIST', 'DELETE', NULL, '2023-10-10 08:22:08', NULL, '2023-10-10 08:22:08'),
(29, 'MAIL', 'DELETE', NULL, '2023-10-10 08:54:38', NULL, '2023-10-10 08:54:38'),
(30, 'BLACKLIST', 'DELETE', NULL, '2023-10-10 08:54:57', NULL, '2023-10-10 08:54:57'),
(31, 'REPORT', 'DELETE', NULL, '2023-10-10 08:55:11', NULL, '2023-10-10 08:55:11'),
(32, 'ITEMLIST', 'READ', NULL, '2024-08-01 08:55:11', NULL, '2024-08-01 08:55:11'),
(33, 'ITEMLIST', 'UPDATE', NULL, '2024-08-01 08:55:11', NULL, '2024-08-01 08:55:11'),
(34, 'ITEMLIST', 'DELETE', NULL, '2024-08-01 08:55:11', NULL, '2024-08-01 08:55:11');
-- 테이블 caliverse.black_list 구조 내보내기
CREATE TABLE IF NOT EXISTS `black_list` (
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`guid` varchar(255) NOT NULL,
`nickname` varchar(50) NOT NULL,
`status` varchar(255) NOT NULL COMMENT '상태(제재중/기간만료/대기중)',
`type` varchar(255) NOT NULL COMMENT '제재 방식(접속제한/채팅제한)',
`sanctions` varchar(255) NOT NULL COMMENT '제재 사유',
`period` varchar(255) NOT NULL COMMENT '제재기간',
`deleted` bit(1) NOT NULL DEFAULT b'0',
`start_dt` datetime NOT NULL COMMENT '제재 시작 일자',
`end_dt` datetime NOT NULL COMMENT '제재 종료 일자',
`create_by` varchar(255) DEFAULT NULL,
`create_dt` datetime NOT NULL DEFAULT current_timestamp(),
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=21 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT='이용자 제재 테이블';
-- 테이블 데이터 caliverse.black_list:~0 rows (대략적) 내보내기
-- 테이블 caliverse.caliverse_meta_data 구조 내보내기
CREATE TABLE IF NOT EXISTS `caliverse_meta_data` (
`file_name` varchar(256) NOT NULL,
`data_id` int(11) NOT NULL DEFAULT 0,
`json_data` longtext NOT NULL CHECK (json_valid(`json_data`)),
PRIMARY KEY (`file_name`,`data_id`) USING BTREE
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
-- 테이블 데이터 caliverse.caliverse_meta_data:~0 rows (대략적) 내보내기
-- 테이블 caliverse.groups 구조 내보내기
CREATE TABLE IF NOT EXISTS `groups` (
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`name` varchar(255) NOT NULL,
`description` varchar(255) DEFAULT NULL,
`deleted` bit(1) NOT NULL DEFAULT b'0',
`create_by` varchar(255) DEFAULT NULL,
`create_dt` datetime NOT NULL DEFAULT current_timestamp(),
`update_by` varchar(255) DEFAULT NULL,
`update_dt` datetime NOT NULL DEFAULT current_timestamp(),
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=6 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
-- 테이블 데이터 caliverse.groups:~0 rows (대략적) 내보내기
INSERT INTO `groups` (`id`, `name`, `description`, `deleted`, `create_by`, `create_dt`, `update_by`, `update_dt`) VALUES
(1, '전체관리자권한', '운영툴 전체 메뉴 사용 가능', b'0', 'khlee', '2024-01-26 14:29:20', 'khlee', '2024-01-26 14:29:29'),
(2, '기본권한', '기본 조회 기능만 사용 가능', b'0', 'khlee', '2024-01-26 14:29:20', 'khlee', '2024-01-26 14:29:29');
-- 테이블 caliverse.group_auth 구조 내보내기
CREATE TABLE IF NOT EXISTS `group_auth` (
`group_auth_id` bigint(20) NOT NULL AUTO_INCREMENT,
`group_id` bigint(20) NOT NULL,
`auth_id` bigint(20) NOT NULL,
PRIMARY KEY (`group_auth_id`),
KEY `FKsx1gx9q7k2thvs83qo1k4xj2o` (`auth_id`),
KEY `FK7iv0c1ovorc2qyb7gd0weu5hk` (`group_id`)
) ENGINE=InnoDB AUTO_INCREMENT=32 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
-- 테이블 데이터 caliverse.group_auth:~31 rows (대략적) 내보내기
INSERT INTO `group_auth` (`group_auth_id`, `group_id`, `auth_id`) VALUES
(1, 1, 1),
(2, 1, 2),
(3, 1, 3),
(4, 1, 4),
(5, 1, 5),
(6, 1, 6),
(7, 1, 7),
(8, 1, 8),
(9, 1, 9),
(10, 1, 10),
(11, 1, 11),
(12, 1, 12),
(13, 1, 13),
(14, 1, 14),
(15, 1, 15),
(16, 1, 16),
(17, 1, 17),
(18, 1, 18),
(19, 1, 19),
(20, 1, 20),
(21, 1, 21),
(22, 1, 22),
(23, 1, 23),
(24, 1, 24),
(25, 1, 25),
(26, 1, 26),
(27, 1, 27),
(28, 1, 28),
(29, 1, 29),
(30, 1, 30),
(31, 1, 31),
(32, 1, 32),
(33, 1, 33),
(34, 1, 34),
(35, 2, 11),
(36, 2, 13),
(37, 2, 14),
(38, 2, 15);
-- 테이블 caliverse.item 구조 내보내기
CREATE TABLE IF NOT EXISTS `item` (
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`mail_id` bigint(20) NOT NULL,
`reward_group_id` varchar(255) NOT NULL,
`item_cnt` bigint(20) NOT NULL,
PRIMARY KEY (`id`),
KEY `FK_item_mail` (`mail_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT='우편 정보 아이템 테이블';
-- 테이블 데이터 caliverse.item:~0 rows (대략적) 내보내기
-- 테이블 caliverse.mail 구조 내보내기
CREATE TABLE IF NOT EXISTS `mail` (
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`target` varchar(255) NOT NULL COMMENT '수신 대상(guid/닉네임/파일명)',
`receive_type` varchar(255) NOT NULL COMMENT '수신 대상(단일/복수)',
`user_type` varchar(255) NOT NULL COMMENT '유저 타입(guid/닉네임)',
`is_reserve` bit(1) NOT NULL DEFAULT b'0' COMMENT '예약 발송 여부',
`send_type` varchar(255) NOT NULL COMMENT '발송 방식',
`send_status` varchar(255) NOT NULL DEFAULT 'WAIT' COMMENT '발송 상태',
`mail_type` varchar(255) NOT NULL COMMENT '우편 타입',
`deleted` bit(1) NOT NULL DEFAULT b'0',
`send_dt` datetime DEFAULT NULL,
`create_by` varchar(255) DEFAULT NULL,
`create_dt` datetime NOT NULL DEFAULT current_timestamp(),
`update_by` varchar(255) DEFAULT NULL,
`update_dt` datetime NOT NULL DEFAULT current_timestamp(),
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=30 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT='우편 관리';
-- 테이블 데이터 caliverse.mail:~0 rows (대략적) 내보내기
-- 테이블 caliverse.message 구조 내보내기
CREATE TABLE IF NOT EXISTS `message` (
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`target_id` bigint(20) NOT NULL,
`type` varchar(50) NOT NULL,
`title` varchar(255) DEFAULT NULL,
`content` longtext NOT NULL,
`language` varchar(50) NOT NULL DEFAULT 'ko',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=9 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT='공지사항& 우편 메시지 내용\r\n';
-- 테이블 데이터 caliverse.message:~0 rows (대략적) 내보내기
-- 테이블 caliverse.notice 구조 내보내기
CREATE TABLE IF NOT EXISTS `notice` (
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`message_type` varchar(255) NOT NULL DEFAULT 'CHATTING',
`send_dt` datetime NOT NULL DEFAULT current_timestamp(),
`is_repeat` bit(1) NOT NULL DEFAULT b'0' COMMENT '반복 발송',
`repeat_Type` varchar(255) DEFAULT NULL COMMENT '반복 타입',
`end_dt` datetime DEFAULT NULL COMMENT '종료 일자',
`repeat_dt` time DEFAULT NULL COMMENT '반복 발송 시간',
`repeat_cnt` bigint(20) DEFAULT 0 COMMENT '반복 횟수',
`send_cnt` bigint(20) DEFAULT 0 COMMENT '송출완료 횟수',
`deleted` bit(1) NOT NULL DEFAULT b'0',
`create_by` varchar(255) DEFAULT NULL,
`create_dt` datetime NOT NULL DEFAULT current_timestamp(),
`update_by` varchar(255) DEFAULT NULL,
`update_dt` datetime NOT NULL DEFAULT current_timestamp(),
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=7 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT='인게임 메시지(공지사항)';
-- 테이블 데이터 caliverse.notice:~0 rows (대략적) 내보내기
-- 테이블 caliverse.token 구조 내보내기
CREATE TABLE IF NOT EXISTS `token` (
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`admin_id` bigint(20) NOT NULL,
`expired` bit(1) NOT NULL,
`revoked` bit(1) NOT NULL DEFAULT b'0',
`token` varchar(255) DEFAULT NULL,
`token_type` varchar(255) DEFAULT NULL,
PRIMARY KEY (`id`),
KEY `admin_id` (`admin_id`)
) ENGINE=InnoDB AUTO_INCREMENT=248 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
-- 테이블 데이터 caliverse.token:~12 rows (대략적) 내보내기
INSERT INTO `token` (`id`, `admin_id`, `expired`, `revoked`, `token`, `token_type`) VALUES
(247, 1, b'0', b'0', 'eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJraGxlZTEwM0Bsb3R0ZS5uZXQiLCJpYXQiOjE3MDcyNzM0ODMsImV4cCI6MTcwNzM1OTg4M30.WC5xFxyVaMAOQ7qr8SLP0VQqLBd4zscYCfhXhwpxOxc', 'BEARER');
-- 테이블 caliverse.white_list 구조 내보내기
CREATE TABLE IF NOT EXISTS `white_list` (
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`guid` varchar(50) NOT NULL,
`nickname` varchar(50) NOT NULL,
`status` varchar(255) NOT NULL DEFAULT 'REJECT',
`deleted` bit(1) NOT NULL DEFAULT b'0' COMMENT '삭제 여부',
`create_by` varchar(255) DEFAULT NULL,
`create_dt` datetime NOT NULL DEFAULT current_timestamp(),
`update_by` varchar(255) DEFAULT NULL,
`update_dt` datetime NOT NULL DEFAULT current_timestamp(),
PRIMARY KEY (`id`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=27 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
-- 테이블 데이터 caliverse.white_list:~0 rows (대략적) 내보내기
/*!40103 SET TIME_ZONE=IFNULL(@OLD_TIME_ZONE, 'system') */;
/*!40101 SET SQL_MODE=IFNULL(@OLD_SQL_MODE, '') */;
/*!40014 SET FOREIGN_KEY_CHECKS=IFNULL(@OLD_FOREIGN_KEY_CHECKS, 1) */;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40111 SET SQL_NOTES=IFNULL(@OLD_SQL_NOTES, 1) */;
-- Autogenerated: do not edit this file
CREATE TABLE IF NOT EXISTS BATCH_JOB_INSTANCE (
JOB_INSTANCE_ID BIGINT NOT NULL PRIMARY KEY ,
VERSION BIGINT ,
JOB_NAME VARCHAR(100) NOT NULL,
JOB_KEY VARCHAR(32) NOT NULL,
constraint JOB_INST_UN unique (JOB_NAME, JOB_KEY)
) ENGINE=InnoDB;
CREATE TABLE IF NOT EXISTS BATCH_JOB_EXECUTION (
JOB_EXECUTION_ID BIGINT NOT NULL PRIMARY KEY ,
VERSION BIGINT ,
JOB_INSTANCE_ID BIGINT NOT NULL,
CREATE_TIME DATETIME(6) NOT NULL,
START_TIME DATETIME(6) DEFAULT NULL ,
END_TIME DATETIME(6) DEFAULT NULL ,
STATUS VARCHAR(10) ,
EXIT_CODE VARCHAR(2500) ,
EXIT_MESSAGE VARCHAR(2500) ,
LAST_UPDATED DATETIME(6),
constraint JOB_INST_EXEC_FK foreign key (JOB_INSTANCE_ID)
references BATCH_JOB_INSTANCE(JOB_INSTANCE_ID)
) ENGINE=InnoDB;
CREATE TABLE IF NOT EXISTS BATCH_JOB_EXECUTION_PARAMS (
JOB_EXECUTION_ID BIGINT NOT NULL ,
PARAMETER_NAME VARCHAR(100) NOT NULL ,
PARAMETER_TYPE VARCHAR(100) NOT NULL ,
PARAMETER_VALUE VARCHAR(2500) ,
IDENTIFYING CHAR(1) NOT NULL ,
constraint JOB_EXEC_PARAMS_FK foreign key (JOB_EXECUTION_ID)
references BATCH_JOB_EXECUTION(JOB_EXECUTION_ID)
) ENGINE=InnoDB;
CREATE TABLE IF NOT EXISTS BATCH_STEP_EXECUTION (
STEP_EXECUTION_ID BIGINT NOT NULL PRIMARY KEY ,
VERSION BIGINT NOT NULL,
STEP_NAME VARCHAR(100) NOT NULL,
JOB_EXECUTION_ID BIGINT NOT NULL,
CREATE_TIME DATETIME(6) NOT NULL,
START_TIME DATETIME(6) DEFAULT NULL ,
END_TIME DATETIME(6) DEFAULT NULL ,
STATUS VARCHAR(10) ,
COMMIT_COUNT BIGINT ,
READ_COUNT BIGINT ,
FILTER_COUNT BIGINT ,
WRITE_COUNT BIGINT ,
READ_SKIP_COUNT BIGINT ,
WRITE_SKIP_COUNT BIGINT ,
PROCESS_SKIP_COUNT BIGINT ,
ROLLBACK_COUNT BIGINT ,
EXIT_CODE VARCHAR(2500) ,
EXIT_MESSAGE VARCHAR(2500) ,
LAST_UPDATED DATETIME(6),
constraint JOB_EXEC_STEP_FK foreign key (JOB_EXECUTION_ID)
references BATCH_JOB_EXECUTION(JOB_EXECUTION_ID)
) ENGINE=InnoDB;
CREATE TABLE IF NOT EXISTS BATCH_STEP_EXECUTION_CONTEXT (
STEP_EXECUTION_ID BIGINT NOT NULL PRIMARY KEY,
SHORT_CONTEXT VARCHAR(2500) NOT NULL,
SERIALIZED_CONTEXT TEXT ,
constraint STEP_EXEC_CTX_FK foreign key (STEP_EXECUTION_ID)
references BATCH_STEP_EXECUTION(STEP_EXECUTION_ID)
) ENGINE=InnoDB;
CREATE TABLE IF NOT EXISTS BATCH_JOB_EXECUTION_CONTEXT (
JOB_EXECUTION_ID BIGINT NOT NULL PRIMARY KEY,
SHORT_CONTEXT VARCHAR(2500) NOT NULL,
SERIALIZED_CONTEXT TEXT ,
constraint JOB_EXEC_CTX_FK foreign key (JOB_EXECUTION_ID)
references BATCH_JOB_EXECUTION(JOB_EXECUTION_ID)
) ENGINE=InnoDB;
CREATE TABLE IF NOT EXISTS BATCH_STEP_EXECUTION_SEQ (
ID BIGINT NOT NULL,
UNIQUE_KEY CHAR(1) NOT NULL,
constraint UNIQUE_KEY_UN unique (UNIQUE_KEY)
) ENGINE=InnoDB;
-- 테이블 데이터 caliverse.BATCH_STEP_EXECUTION_SEQ:~0 rows (대략적) 내보내기
INSERT INTO BATCH_STEP_EXECUTION_SEQ (ID, UNIQUE_KEY) select * from (select 0 as ID, '0' as UNIQUE_KEY) as tmp where not exists(select * from BATCH_STEP_EXECUTION_SEQ);
CREATE TABLE IF NOT EXISTS BATCH_JOB_EXECUTION_SEQ (
ID BIGINT NOT NULL,
UNIQUE_KEY CHAR(1) NOT NULL,
constraint UNIQUE_KEY_UN unique (UNIQUE_KEY)
) ENGINE=InnoDB;
-- 테이블 데이터 caliverse.BATCH_JOB_EXECUTION_SEQ:~0 rows (대략적) 내보내기
INSERT INTO BATCH_JOB_EXECUTION_SEQ (ID, UNIQUE_KEY) select * from (select 0 as ID, '0' as UNIQUE_KEY) as tmp where not exists(select * from BATCH_JOB_EXECUTION_SEQ);
CREATE TABLE IF NOT EXISTS BATCH_JOB_SEQ (
ID BIGINT NOT NULL,
UNIQUE_KEY CHAR(1) NOT NULL,
constraint UNIQUE_KEY_UN unique (UNIQUE_KEY)
) ENGINE=InnoDB;
-- 테이블 데이터 caliverse.BATCH_JOB_SEQ:~0 rows (대략적) 내보내기
INSERT INTO BATCH_JOB_SEQ (ID, UNIQUE_KEY) select * from (select 0 as ID, '0' as UNIQUE_KEY) as tmp where not exists(select * from BATCH_JOB_SEQ);

View File

View File

@@ -0,0 +1,22 @@
version: '3.7'
services:
db:
image: mariadb:10
container_name: admin-mariadb
ports:
- 3306:3306
volumes:
- ./db/conf.d:/etc/mysql/conf.d
- ./db/data:/var/lib/mysql
- ./db/initdb.d:/docker-entrypoint-initdb.d
env_file: .env
environment:
TZ: UTC
networks:
- backend
restart: always
networks:
backend:

View File

@@ -0,0 +1,5 @@
{
"Redis": "127.0.0.1:6379",
"Dynamodb": "http://localhost:8000",
"Kafka": "localhost:9092"
}

View File

@@ -0,0 +1,26 @@
version: '2'
services:
consul:
image: consul:latest
ports:
- "8500:8500"
consulsetup:
image: consul:latest
depends_on:
- consul
restart: "no"
volumes:
- ./config/consul_config.json:/etc/consul_config.json
entrypoint:
["sh", "-c", "curl --request PUT --data @/etc/consul_config.json http://consul:8500/v1/kv/config"]
redis:
image: redis:latest
ports:
- "6379:6379"
dynamodb:
image: amazon/dynamodb-local
ports:
- "8000:8000"

69
docker-compose.yml Normal file
View File

@@ -0,0 +1,69 @@
version: '4.29'
services:
redis-master:
restart: always
image: redis:7.0.4
command: "redis-server --requirepass KT-i5#i%-%LxKfZ5YJj6"
container_name: redis
volumes:
- "./redis_master:/home/redis_master/data"
ports:
- '6379:6379'
working_dir: /home/redis_master
rabbitmq:
restart: always
image: 'rabbitmq:3-management-alpine'
container_name: rabbitmq-stream
ports:
- "5672:5672"
- "15672:15672"
environment:
RABBITMQ_ERLANG_COOKIE: "RabbitMQ-My-Cookies"
RABBITMQ_DEFAULT_USER: "admin"
RABBITMQ_DEFAULT_PASS: "admin"
dynamodb-local:
restart: always
command: "-jar DynamoDBLocal.jar -sharedDb -dbPath ./data"
image: amazon/dynamodb-local
container_name: dynamodb-local
ports:
- "8000:8000"
volumes:
- "./dynamodb:/home/dynamodblocal/data"
working_dir: /home/dynamodblocal
dynamodb-admin:
restart: always
image: aaronshaf/dynamodb-admin
ports:
- "8001:8001"
environment:
DYNAMO_ENDPOINT: "http://dynamodb-local:8000"
AWS_REGION: "us-west-2"
AWS_ACCESS_KEY_ID: local
AWS_SECRET_ACCESS_KEY: local
depends_on:
- dynamodb-local
mongodb:
image: mongo
container_name: mongodb
restart: always
ports:
- 27017:27017
volumes:
- ./mongodb_data:/data/db
environment:
- MONGO_INITDB_ROOT_USERNAME=root
- MONGO_INITDB_ROOT_PASSWORD=root
mongo-express:
image: mongo-express
restart: always
ports:
- 27117:8081
environment:
ME_CONFIG_MONGODB_URL: mongodb://root:root@mongodb:27017
depends_on:
- mongodb

View File

@@ -0,0 +1,33 @@
version: '3.7'
services:
redis-master:
image: redis:7.0.4
command: "redis-server --requirepass KT-i5#i%-%LxKfZ5YJj6"
container_name: redis
volumes:
- "./redis_master:/home/redis_master/data"
ports:
- '6379:6379'
working_dir: /home/redis_master
dynamodb:
command: "-jar DynamoDBLocal.jar -inMemory"
image: amazon/dynamodb-local:2.0.0
container_name: dynamodb-local
ports:
- "8000:8000"
volumes:
- "./dynamodb:/home/dynamodblocal/data"
working_dir: /home/dynamodblocal
rabbitmq:
image: 'rabbitmq:3-management-alpine'
container_name: rabbitmq-stream
ports:
- "5672:5672"
- "15672:15672"
environment:
RABBITMQ_ERLANG_COOKIE: "RabbitMQ-My-Cookies"
RABBITMQ_DEFAULT_USER: "admin"
RABBITMQ_DEFAULT_PASS: "admin"

20
grafana-docker/LICENSE Normal file
View File

@@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2021 Benjamin Cremer
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

75
grafana-docker/README.md Normal file
View File

@@ -0,0 +1,75 @@
# Example Docker Compose project for Telegraf, InfluxDB and Grafana
This an example project to show the TIG (Telegraf, InfluxDB and Grafana) stack.
![Example Screenshot](./example.png?raw=true "Example Screenshot")
## Start the stack with docker compose
```bash
$ docker-compose up
```
## Services and Ports
### Grafana
- URL: http://localhost:3000
- User: admin
- Password: admin
### Telegraf
- Port: 8125 UDP (StatsD input)
### InfluxDB
- Port: 8086 (HTTP API)
- User: admin
- Password: admin
- Database: influx
Run the influx client:
```bash
$ docker-compose exec influxdb influx -execute 'SHOW DATABASES'
```
Run the influx interactive console:
```bash
$ docker-compose exec influxdb influx
Connected to http://localhost:8086 version 1.8.0
InfluxDB shell version: 1.8.0
>
```
[Import data from a file with -import](https://docs.influxdata.com/influxdb/v1.8/tools/shell/#import-data-from-a-file-with-import)
```bash
$ docker-compose exec -w /imports influxdb influx -import -path=data.txt -precision=s
```
## Run the PHP Example
The PHP example generates random example metrics. The random metrics are beeing sent via UDP to the telegraf agent using the StatsD protocol.
The telegraf agents aggregates the incoming data and perodically persists the data into the InfluxDB database.
Grafana connects to the InfluxDB database and is able to visualize the incoming data.
```bash
$ cd php-example
$ composer install
$ php example.php
Sending Random metrics. Use Ctrl+C to stop.
..........................^C
Runtime: 0.88382697105408 Seconds
Ops: 27
Ops/s: 30.548965899738
Killed by Ctrl+C
```
## License
The MIT License (MIT). Please see [License File](LICENSE) for more information.

View File

@@ -0,0 +1,9 @@
# Grafana options
GF_SECURITY_ADMIN_USER=admin
GF_SECURITY_ADMIN_PASSWORD=admin
GF_INSTALL_PLUGINS=
# InfluxDB options
INFLUXDB_DB=influx
INFLUXDB_ADMIN_USER=admin
INFLUXDB_ADMIN_PASSWORD=admin

View File

@@ -0,0 +1,39 @@
version: '3.6'
services:
telegraf:
image: telegraf:1.18-alpine
volumes:
- ./telegraf/etc/telegraf.conf:/etc/telegraf/telegraf.conf:ro
depends_on:
- influxdb
links:
- influxdb
ports:
- '8125:8125/udp'
influxdb:
image: influxdb:1.8-alpine
env_file: configuration.env
ports:
- '8086:8086'
volumes:
- ./:/imports
- influxdb_data:/var/lib/influxdb
grafana:
image: grafana/grafana:8.0.2
depends_on:
- influxdb
env_file: configuration.env
links:
- influxdb
ports:
- '3000:3000'
volumes:
- grafana_data:/var/lib/grafana
- ./grafana/provisioning/:/etc/grafana/provisioning/
- ./grafana/dashboards/:/var/lib/grafana/dashboards/
volumes:
grafana_data: {}
influxdb_data: {}

BIN
grafana-docker/example.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 324 KiB

View File

@@ -0,0 +1,373 @@
{
"annotations": {
"list": [
{
"$$hashKey": "object:7",
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"links": [],
"panels": [
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "InfluxDB",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 24,
"x": 0,
"y": 0
},
"hiddenSeries": false,
"id": 4,
"interval": "",
"legend": {
"alignAsTable": true,
"avg": true,
"current": true,
"max": true,
"min": true,
"show": true,
"total": false,
"values": true
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"alias": "$tag_type",
"groupBy": [
{
"params": [
"$__interval"
],
"type": "time"
},
{
"params": [
"type"
],
"type": "tag"
},
{
"params": [
"null"
],
"type": "fill"
}
],
"measurement": "performance_request_successful_time",
"orderByTime": "ASC",
"policy": "default",
"refId": "A",
"resultFormat": "time_series",
"select": [
[
{
"params": [
"90_percentile"
],
"type": "field"
},
{
"params": [],
"type": "mean"
}
]
],
"tags": []
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Request Time",
"tooltip": {
"shared": true,
"sort": 1,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:237",
"decimals": null,
"format": "ms",
"label": "",
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"$$hashKey": "object:238",
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": false
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "InfluxDB",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 11,
"w": 24,
"x": 0,
"y": 9
},
"hiddenSeries": false,
"id": 2,
"legend": {
"alignAsTable": true,
"avg": true,
"current": false,
"hideEmpty": false,
"hideZero": false,
"max": true,
"min": true,
"rightSide": false,
"show": true,
"total": true,
"values": true
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"alias": "Request Type: $tag_type",
"groupBy": [
{
"params": [
"$__interval"
],
"type": "time"
},
{
"params": [
"type"
],
"type": "tag"
},
{
"params": [
"none"
],
"type": "fill"
}
],
"hide": false,
"measurement": "performance_request_successful_count",
"orderByTime": "ASC",
"policy": "default",
"refId": "A",
"resultFormat": "time_series",
"select": [
[
{
"params": [
"value"
],
"type": "field"
},
{
"params": [],
"type": "sum"
},
{
"params": [
" / $__interval_ms*1000"
],
"type": "math"
}
]
],
"tags": []
},
{
"alias": "All Types",
"groupBy": [
{
"params": [
"$__interval"
],
"type": "time"
}
],
"hide": false,
"measurement": "performance_request_successful_count",
"orderByTime": "ASC",
"policy": "default",
"refId": "B",
"resultFormat": "time_series",
"select": [
[
{
"params": [
"value"
],
"type": "field"
},
{
"params": [],
"type": "sum"
},
{
"params": [
" / $__interval_ms*1000"
],
"type": "math"
}
]
],
"tags": []
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Requests per Second",
"tooltip": {
"shared": true,
"sort": 2,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:126",
"format": "reqps",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"$$hashKey": "object:127",
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
}
],
"refresh": "5s",
"schemaVersion": 22,
"style": "dark",
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-5m",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"timezone": "",
"title": "Performance",
"uid": "1Mar-DTiz",
"variables": {
"list": []
},
"version": 1
}

View File

@@ -0,0 +1,11 @@
apiVersion: 1
providers:
- name: 'default'
orgId: 1
folder: ''
type: file
disableDeletion: false
updateIntervalSeconds: 3 #how often Grafana will scan for changed dashboards
options:
path: /var/lib/grafana/dashboards

View File

@@ -0,0 +1,46 @@
# config file version
apiVersion: 1
# list of datasources that should be deleted from the database
deleteDatasources:
- name: Influxdb
orgId: 1
# list of datasources to insert/update depending
# whats available in the database
datasources:
# <string, required> name of the datasource. Required
- name: InfluxDB
# <string, required> datasource type. Required
type: influxdb
# <string, required> access mode. direct or proxy. Required
access: proxy
# <int> org id. will default to orgId 1 if not specified
orgId: 1
# <string> url
url: http://influxdb:8086
# <string> database password, if used
password: "admin"
# <string> database user, if used
user: "admin"
# <string> database name, if used
database: "influx"
# <bool> enable/disable basic auth
basicAuth: false
# withCredentials:
# <bool> mark as default datasource. Max one per org
isDefault: true
# <map> fields that will be converted to json and stored in json_data
jsonData:
timeInterval: "5s"
# graphiteVersion: "1.1"
# tlsAuth: false
# tlsAuthWithCACert: false
# # <string> json object of data that will be encrypted.
# secureJsonData:
# tlsCACert: "..."
# tlsClientCert: "..."
# tlsClientKey: "..."
version: 1
# <bool> allow users to edit datasources from the UI.
editable: false

1
grafana-docker/php-example/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
vendor/

View File

@@ -0,0 +1,6 @@
{
"require": {
"league/statsd": "^1.5",
"ext-pcntl": "*"
}
}

79
grafana-docker/php-example/composer.lock generated Normal file
View File

@@ -0,0 +1,79 @@
{
"_readme": [
"This file locks the dependencies of your project to a known state",
"Read more about it at https://getcomposer.org/doc/01-basic-usage.md#installing-dependencies",
"This file is @generated automatically"
],
"content-hash": "54a6353042ab1f33202309a7a67786a2",
"packages": [
{
"name": "league/statsd",
"version": "1.5.0",
"source": {
"type": "git",
"url": "https://github.com/thephpleague/statsd.git",
"reference": "c6290ef6c7528b7b739b26ce6aedf81ee6a4a2ac"
},
"dist": {
"type": "zip",
"url": "https://api.github.com/repos/thephpleague/statsd/zipball/c6290ef6c7528b7b739b26ce6aedf81ee6a4a2ac",
"reference": "c6290ef6c7528b7b739b26ce6aedf81ee6a4a2ac",
"shasum": ""
},
"require-dev": {
"phpunit/phpunit": "^5.7 || ^6.5"
},
"type": "library",
"extra": {
"laravel": {
"providers": [
"League\\StatsD\\Laravel5\\Provider\\StatsdServiceProvider"
],
"aliases": {
"Statsd": "League\\StatsD\\Laravel5\\Facade\\StatsdFacade"
}
}
},
"autoload": {
"psr-4": {
"League\\StatsD\\": "src"
}
},
"notification-url": "https://packagist.org/downloads/",
"license": [
"MIT"
],
"authors": [
{
"name": "Marc Qualie",
"email": "marc@marcqualie.com",
"homepage": "http://marcqualie.com",
"role": "Developer"
}
],
"description": "A simple library for working with StatsD in PHP.",
"homepage": "https://github.com/thephpleague/statsd",
"keywords": [
"graphite",
"library",
"statsd"
],
"support": {
"issues": "https://github.com/thephpleague/statsd/issues",
"source": "https://github.com/thephpleague/statsd/tree/1.5.0"
},
"time": "2018-10-09T16:02:46+00:00"
}
],
"packages-dev": [],
"aliases": [],
"minimum-stability": "stable",
"stability-flags": [],
"prefer-stable": false,
"prefer-lowest": false,
"platform": {
"ext-pcntl": "*"
},
"platform-dev": [],
"plugin-api-version": "2.1.0"
}

View File

@@ -0,0 +1,48 @@
<?php
declare(strict_types=1);
require __DIR__ . '/vendor/autoload.php';
$statsd = new League\StatsD\Client();
$statsd->configure([
'host' => '127.0.0.1',
'port' => 8125,
'namespace' => 'performance'
]);
$ops = 0;
$requestsSent = 0;
$startTime = microtime(true);
pcntl_async_signals(true);
pcntl_signal(SIGINT, static function () use (&$ops, $startTime, &$requestsSent) {
$runtime = microtime(true) - $startTime;
$opsPerSecond = $ops / $runtime;
$requestsPerSecond = $requestsSent / $runtime;
echo PHP_EOL;
echo "Runtime:\t${runtime} Seconds\n";
echo "Ops:\t\t${ops} \n";
echo "Ops/s:\t\t${opsPerSecond} \n";
echo "Requests Sent:\t${requestsSent} \n";
echo "Requests/s:\t${requestsPerSecond} \n";
echo "Killed by Ctrl+C\n";
exit(0);
});
echo "Sending Random metrics. Use Ctrl+C to stop.\n";
while (true) {
$time = random_int(100, 400);
$types = ['search', 'book', 'login', 'login'];
$type = $types[random_int(0 , 3)];
$delta = random_int(1, 5);
$statsd->increment('request.successful.count,type=' . $type, $delta);
$statsd->timing('request.successful.time,type=' . $type, $time);
$requestsSent += $delta;
++$ops;
usleep(random_int(5, 55) * 1000);
echo '.';
}

View File

@@ -0,0 +1,212 @@
# Telegraf configuration
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
# Global tags can be specified here in key="value" format.
[global_tags]
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
# rack = "1a"
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "5s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will cache metric_buffer_limit metrics for each output, and will
## flush this buffer on a successful write.
metric_buffer_limit = 10000
## Flush the buffer whenever full, regardless of flush_interval.
flush_buffer_when_full = true
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Default flushing interval for all outputs. You shouldn't set this below
## interval. Maximum flush_interval will be flush_interval + flush_jitter
flush_interval = "1s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## Run telegraf in debug mode
debug = false
## Run telegraf in quiet mode
quiet = false
## Override default hostname, if empty use os.Hostname()
hostname = ""
###############################################################################
# OUTPUTS #
###############################################################################
# Configuration for influxdb server to send metrics to
[[outputs.influxdb]]
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
# Multiple urls can be specified but it is assumed that they are part of the same
# cluster, this means that only ONE of the urls will be written to each interval.
# urls = ["udp://localhost:8089"] # UDP endpoint example
urls = ["http://influxdb:8086"] # required
# The target database for metrics (telegraf will create it if not exists)
database = "influx" # required
# Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
# note: using second precision greatly helps InfluxDB compression
precision = "s"
## Write timeout (for the InfluxDB client), formatted as a string.
## If not provided, will default to 5s. 0s means no timeout (not recommended).
timeout = "5s"
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
# user_agent = "telegraf"
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
# udp_payload = 512
###############################################################################
# INPUTS #
###############################################################################
# Statsd Server
[[inputs.statsd]]
## Protocol, must be "tcp", "udp4", "udp6" or "udp" (default=udp)
protocol = "udp"
## MaxTCPConnection - applicable when protocol is set to tcp (default=250)
max_tcp_connections = 250
## Enable TCP keep alive probes (default=false)
tcp_keep_alive = false
## Specifies the keep-alive period for an active network connection.
## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false.
## Defaults to the OS configuration.
# tcp_keep_alive_period = "2h"
## Address and port to host UDP listener on
service_address = ":8125"
## The following configuration options control when telegraf clears it's cache
## of previous values. If set to false, then telegraf will only clear it's
## cache when the daemon is restarted.
## Reset gauges every interval (default=true)
delete_gauges = true
## Reset counters every interval (default=true)
delete_counters = true
## Reset sets every interval (default=true)
delete_sets = true
## Reset timings & histograms every interval (default=true)
delete_timings = true
## Percentiles to calculate for timing & histogram stats
percentiles = [90]
## separator to use between elements of a statsd metric
metric_separator = "_"
## Parses tags in the datadog statsd format
## http://docs.datadoghq.com/guides/dogstatsd/
parse_data_dog_tags = false
## Statsd data translation templates, more info can be read here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite
# templates = [
# "cpu.* measurement*"
# ]
## Number of UDP messages allowed to queue up, once filled,
## the statsd server will start dropping packets
allowed_pending_messages = 10000
## Number of timing/histogram values to track per-measurement in the
## calculation of percentiles. Raising this limit increases the accuracy
## of percentiles but also increases the memory usage and cpu time.
percentile_limit = 1000
## Maximum socket buffer size in bytes, once the buffer fills up, metrics
## will start dropping. Defaults to the OS default.
# read_buffer_size = 65535
# Read metrics about cpu usage
[[inputs.cpu]]
## Whether to report per-cpu stats or not
percpu = true
## Whether to report total system cpu stats or not
totalcpu = true
## Comment this line if you want the raw CPU time metrics
fielddrop = ["time_*"]
# Read metrics about disk usage by mount point
[[inputs.disk]]
## By default, telegraf gather stats for all mountpoints.
## Setting mountpoints will restrict the stats to the specified mountpoints.
# mount_points = ["/"]
## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
## present on /run, /var/run, /dev/shm or /dev).
ignore_fs = ["tmpfs", "devtmpfs"]
# Read metrics about disk IO by device
[[inputs.diskio]]
## By default, telegraf will gather stats for all devices including
## disk partitions.
## Setting devices will restrict the stats to the specified devices.
# devices = ["sda", "sdb"]
## Uncomment the following line if you need disk serial numbers.
# skip_serial_number = false
# Get kernel statistics from /proc/stat
[[inputs.kernel]]
# no configuration
# Read metrics about memory usage
[[inputs.mem]]
# no configuration
# Get the number of processes and group them by status
[[inputs.processes]]
# no configuration
# Read metrics about swap memory usage
[[inputs.swap]]
# no configuration
# Read metrics about system load & uptime
[[inputs.system]]
# no configuration
# Read metrics about network interface usage
[[inputs.net]]
# collect data only about specific interfaces
# interfaces = ["eth0"]
[[inputs.netstat]]
# no configuration
[[inputs.interrupts]]
# no configuration
[[inputs.linux_sysctl_fs]]
# no configuration

1
kafka-docker Submodule

Submodule kafka-docker added at a49ac63983

View File

@@ -0,0 +1,43 @@
version: '3.7'
services:
mongodb:
restart: always
build:
context: ./mongodb/
dockerfile: Dockerfile
container_name: devmongo
ports:
- "27017:27017"
volumes:
- ./mongodb/data:/data/db
- ./mongodb/init:/docker-entrypoint-initdb.d
command: --bind_ip_all
networks:
- logging
fluentd:
image: fluent/fluentd:edge-debian
container_name: devfluentd
restart: always
user: root
command: >
/bin/sh -c "
apt-get update &&
apt-get install -y build-essential ruby-dev &&
gem install fluent-plugin-mongo --no-document &&
apt-get remove -y build-essential ruby-dev &&
apt-get autoremove -y &&
exec fluentd -c /fluentd/etc/fluent.conf"
volumes:
- ./fluentd/conf:/fluentd/etc
- ../../bin/Debug/logs/archive/business:/fluentd/businesslogs
depends_on:
- mongodb
networks:
- logging
networks:
logging:
driver: bridge

View File

@@ -0,0 +1,23 @@
<match mongo.**>
@type mongo
host mongodb
port 27017
database LogDB
collection Log
<buffer>
flush_interval 5s
</buffer>
</match>
<source>
@type tail
path /fluentd/businesslogs/*.json
pos_file /var/log/td-agent/serverlog.pos
<parse>
@type json
</parse>
tag mongo.server.log
</source>

View File

@@ -0,0 +1,10 @@
# Dockerfile
# 공식 MongoDB 이미지 사용
FROM mongo:latest
# MongoDB 클라이언트 설치
RUN apt-get install -y mongodb-org-shell && rm -rf /var/lib/apt/lists/*
# 컨테이너 시작시 MongoDB 클라이언트 도구를 사용하여 스크립트 실행
CMD mongosh localhost:27017/LogDB /docker-entrypoint-initdb.d/init-mongo.js

View File

@@ -0,0 +1,2 @@
db = db.getSiblingDB('LogDB');
db.createCollection('Log');

View File

@@ -0,0 +1,19 @@
해당 도커는 Local, Dev 환경에서 사용하기 위한 설정
Local, Dev 환경에서 MongoDB, FluentD가 필요할경우 설치 진행
Fluentd 가 로그파일을 MongoDB로 전송하기 위해 docker-compose.yml 의 Fluentd Volume 정보를 해당 서버 경로 에 맞게 수정해줘야 한다.
docker-compose.yml 파일 실행 경로를 기준으로 로그파일 저장되어 있는 폴더를 상대 경로로 지정
예시 :
volumes:
- ./fluentd/conf:/fluentd/etc
- ../../bin/Debug/logs/archive/business:/fluentd/businesslogs
위 ./../bin/Debug/logs/archive/business: 라고 되어 있는 경로가 로그파일이 존재하는 경로
다른내용은 바꾸지 않는다.

View File

@@ -0,0 +1,73 @@
version: '3'
services:
opensearch-node1:
image: opensearchproject/opensearch:latest
container_name: opensearch-node1
environment:
- cluster.name=opensearch-cluster
- node.name=opensearch-node1
- discovery.seed_hosts=opensearch-node1,opensearch-node2
- cluster.initial_cluster_manager_nodes=opensearch-node1,opensearch-node2
- bootstrap.memory_lock=true # along with the memlock settings below, disables swapping
- "OPENSEARCH_JAVA_OPTS=-Xms512m -Xmx512m" # minimum and maximum Java heap size, recommend setting both to 50% of system RAM
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 65536 # maximum number of open files for the OpenSearch user, set to at least 65536 on modern systems
hard: 65536
volumes:
- opensearch-data1:/usr/share/opensearch/data
ports:
- 9200:9200
- 9600:9600 # required for Performance Analyzer
networks:
- opensearch-net
opensearch-node2:
image: opensearchproject/opensearch:latest
container_name: opensearch-node2
environment:
- cluster.name=opensearch-cluster
- node.name=opensearch-node2
- discovery.seed_hosts=opensearch-node1,opensearch-node2
- cluster.initial_cluster_manager_nodes=opensearch-node1,opensearch-node2
- bootstrap.memory_lock=true
- "OPENSEARCH_JAVA_OPTS=-Xms512m -Xmx512m"
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 65536
hard: 65536
volumes:
- opensearch-data2:/usr/share/opensearch/data
networks:
- opensearch-net
opensearch-dashboards:
image: opensearchproject/opensearch-dashboards:latest
container_name: opensearch-dashboards
ports:
- 5601:5601
expose:
- "5601"
environment:
OPENSEARCH_HOSTS: '["https://opensearch-node1:9200","https://opensearch-node2:9200"]'
networks:
- opensearch-net
filebeat:
image: docker.elastic.co/beats/filebeat:7.16.3
user: root
volumes:
- ./filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
- /var/lib/docker/containers:/var/lib/docker/containers:ro
networks:
- elk
volumes:
opensearch-data1:
opensearch-data2:
networks:
opensearch-net:

View File

@@ -0,0 +1,43 @@
version: '3'
services:
opensearch-node1:
image: opensearchproject/opensearch:latest
container_name: opensearch-node1
environment:
- cluster.name=opensearch-cluster
- node.name=opensearch-node1
- discovery.seed_hosts=opensearch-node1
- discovery.type=single-node
- bootstrap.memory_lock=true # along with the memlock settings below, disables swapping
- "OPENSEARCH_JAVA_OPTS=-Xms512m -Xmx512m" # minimum and maximum Java heap size, recommend setting both to 50% of system RAM
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 65536 # maximum number of open files for the OpenSearch user, set to at least 65536 on modern systems
hard: 65536
volumes:
- opensearch-data1:/usr/share/opensearch/data
ports:
- 9200:9200
- 9600:9600 # required for Performance Analyzer
networks:
- opensearch-net
opensearch-dashboards:
image: opensearchproject/opensearch-dashboards:latest
container_name: opensearch-dashboards
ports:
- 5601:5601
expose:
- "5601"
environment:
OPENSEARCH_HOSTS: '["https://opensearch-node1:9200"]'
networks:
- opensearch-net
volumes:
opensearch-data1:
networks:
opensearch-net:

View File

@@ -0,0 +1,12 @@
version: '3'
services:
rabbitmq:
image: 'rabbitmq:3-management-alpine'
container_name: rabbitmq-stream
ports:
- "5672:5672"
- "15672:15672"
environment:
RABBITMQ_ERLANG_COOKIE: "RabbitMQ-My-Cookies"
RABBITMQ_DEFAULT_USER: "admin"
RABBITMQ_DEFAULT_PASS: "admin"

49
readme.txt Normal file
View File

@@ -0,0 +1,49 @@
docker-compose up -d
================================
이하 레거시
=================================
# consul ui
http://127.0.0.1:8500/
# dynamodb
http://localhost:8000
# redis
127.0.0.1:6379
# kafka ui
http://127.0.0.1:9000/
# kafka BootstrapServer
localhost:9092
# grafana
http://127.0.0.1:3000/
##############################
Yikes! KeeperErrorCode = Unimplemented for /kafka-manager/mutex Try again.
kafka-manager 에러 발생 시
zookeeper 에 접속한 뒤 ./bin/zkCli.sh 로 주키퍼 클라이언트 실행 후 아래와 같이 /kafka-manager 경로에 3개 폴더 생성
# docker exec -it my-zookeeper /bin/bash
root@ec5ea8a1e52b:/opt/zookeeper-3.4.13# ./bin/zkCli.sh
...
[zk: localhost:2181(CONNECTED) 0] ls /kafka-manager
[configs, clusters, deleteClusters]
[zk: localhost:2181(CONNECTED) 1] create /kafka-manager/mutex ""
Created /kafka-manager/mutex
[zk: localhost:2181(CONNECTED) 2] create /kafka-manager/mutex/locks ""
Created /kafka-manager/mutex/locks
[zk: localhost:2181(CONNECTED) 3] create /kafka-manager/mutex/leases ""
Created /kafka-manager/mutex/leases
##############################

22
redis-master-slave.yml Normal file
View File

@@ -0,0 +1,22 @@
version: '3.7'
services:
redis-master:
image: redis:latest
hostname: redis-master
volumes:
- redis_master:/data
ports:
- '6379:6379'
redis-slave:
image: redis:latest
hostname: redis-slave
volumes:
- redis_slave:/data
ports:
- '6479:6479'
command: redis-server --slaveof redis-master 6379
volumes:
redis_master:
redis_slave:

2276
redis_master/redis.conf Normal file

File diff suppressed because it is too large Load Diff