빅쿼리 모니터링
성능 모니터링 및 알림
BigQuery의 성능, 비용, 사용량을 모니터링하고 최적화하는 방법을 다루는 가이드입니다.
목차
1. 모니터링 개요
1.1 모니터링 데이터 소스
BigQuery는 다양한 메타데이터 뷰를 제공합니다:
-- 주요 모니터링 테이블들
SELECT table_name, table_type
FROM `project.region-us.INFORMATION_SCHEMA.TABLES`
WHERE table_schema = 'INFORMATION_SCHEMA'
AND table_name LIKE '%JOB%'
ORDER BY table_name;
-- 결과:
-- JOBS_BY_PROJECT
-- JOBS_BY_USER
-- JOBS_BY_ORGANIZATION
-- JOB_TIMELINE_BY_PROJECT
-- JOB_TIMELINE_BY_USER
1.2 기본 모니터링 쿼리
-- 일일 BigQuery 사용 현황 요약
CREATE OR REPLACE VIEW `project.monitoring.daily_usage_summary` AS
SELECT
DATE(creation_time) as usage_date,
user_email,
project_id,
-- 쿼리 통계
COUNT(*) as total_queries,
COUNT(CASE WHEN state = 'DONE' AND error_result IS NULL THEN 1 END) as successful_queries,
COUNT(CASE WHEN error_result IS NOT NULL THEN 1 END) as failed_queries,
-- 성능 메트릭
ROUND(AVG(TIMESTAMP_DIFF(end_time, start_time, SECOND)), 2) as avg_duration_seconds,
ROUND(MAX(TIMESTAMP_DIFF(end_time, start_time, SECOND)), 2) as max_duration_seconds,
-- 리소스 사용량
ROUND(SUM(total_bytes_processed) / 1024 / 1024 / 1024, 2) as total_gb_processed,
ROUND(SUM(total_slot_ms) / 1000 / 60 / 60, 2) as total_slot_hours,
-- 비용 추정 (스캔 비용 기준)
ROUND(SUM(total_bytes_processed) / 1024 / 1024 / 1024 / 1024 * 5, 2) as estimated_scan_cost_usd
FROM `project.region-us.INFORMATION_SCHEMA.JOBS_BY_PROJECT`
WHERE DATE(creation_time) >= DATE_SUB(CURRENT_DATE(), INTERVAL 7 DAY)
AND job_type = 'QUERY'
GROUP BY DATE(creation_time), user_email, project_id
ORDER BY usage_date DESC, total_gb_processed DESC;
2. 쿼리 성능 모니터링
2.1 느린 쿼리 감지
-- 성능이 좋지 않은 쿼리 식별
WITH slow_queries AS (
SELECT
job_id,
user_email,
query,
creation_time,
start_time,
end_time,
TIMESTAMP_DIFF(end_time, start_time, SECOND) as duration_seconds,
total_bytes_processed / 1024 / 1024 / 1024 as gb_processed,
total_slot_ms / 1000 / 60 as slot_minutes,
-- 효율성 지표
total_bytes_processed / NULLIF(total_slot_ms, 0) * 1000 as bytes_per_slot_ms,
-- 복잡도 지표
LENGTH(query) as query_length,
(LENGTH(query) - LENGTH(REPLACE(UPPER(query), 'JOIN', ''))) / 4 as join_count,
(LENGTH(query) - LENGTH(REPLACE(UPPER(query), 'SELECT', ''))) / 6 as select_count
FROM `project.region-us.INFORMATION_SCHEMA.JOBS_BY_PROJECT`
WHERE DATE(creation_time) >= DATE_SUB(CURRENT_DATE(), INTERVAL 1 DAY)
AND job_type = 'QUERY'
AND state = 'DONE'
AND error_result IS NULL
AND TIMESTAMP_DIFF(end_time, start_time, SECOND) > 60 -- 1분 이상 실행
)
SELECT
job_id,
user_email,
SUBSTR(query, 1, 100) as query_preview,
duration_seconds,
gb_processed,
slot_minutes,
-- 성능 등급
CASE
WHEN duration_seconds > 3600 THEN 'Critical (>1hr)'
WHEN duration_seconds > 1800 THEN 'High (>30min)'
WHEN duration_seconds > 600 THEN 'Medium (>10min)'
ELSE 'Low (1-10min)'
END as performance_issue_level,
-- 최적화 제안
CASE
WHEN bytes_per_slot_ms < 1000 THEN 'Consider query optimization - low bytes per slot'
WHEN gb_processed > 100 THEN 'Large scan - consider partitioning or clustering'
WHEN join_count > 5 THEN 'Complex joins - review join order and conditions'
WHEN select_count > 3 THEN 'Multiple subqueries - consider CTEs or temp tables'
ELSE 'Review for other optimization opportunities'
END as optimization_suggestion
FROM slow_queries
ORDER BY duration_seconds DESC
LIMIT 20;
2.2 쿼리 패턴 분석
-- 쿼리 패턴별 성능 분석
WITH query_patterns AS (
SELECT
job_id,
user_email,
query,
TIMESTAMP_DIFF(end_time, start_time, SECOND) as duration_seconds,
total_bytes_processed / 1024 / 1024 / 1024 as gb_processed,
-- 쿼리 패턴 분류
CASE
WHEN UPPER(query) LIKE '%CREATE%TABLE%AS%SELECT%' THEN 'CTAS'
WHEN UPPER(query) LIKE '%INSERT%INTO%' THEN 'INSERT'
WHEN UPPER(query) LIKE '%UPDATE%' THEN 'UPDATE'
WHEN UPPER(query) LIKE '%DELETE%' THEN 'DELETE'
WHEN UPPER(query) LIKE '%CREATE%VIEW%' THEN 'CREATE_VIEW'
ELSE 'SELECT'
END as query_pattern,
-- 복잡도 분석
(LENGTH(query) - LENGTH(REPLACE(UPPER(query), 'JOIN', ''))) / 4 as join_count,
(LENGTH(query) - LENGTH(REPLACE(UPPER(query), 'WHERE', ''))) / 5 as where_count,
(LENGTH(query) - LENGTH(REPLACE(UPPER(query), 'GROUP BY', ''))) / 8 as groupby_count,
(LENGTH(query) - LENGTH(REPLACE(UPPER(query), 'ORDER BY', ''))) / 8 as orderby_count
FROM `project.region-us.INFORMATION_SCHEMA.JOBS_BY_PROJECT`
WHERE DATE(creation_time) >= DATE_SUB(CURRENT_DATE(), INTERVAL 7 DAY)
AND job_type = 'QUERY'
AND state = 'DONE'
AND error_result IS NULL
)
SELECT
query_pattern,
COUNT(*) as query_count,
ROUND(AVG(duration_seconds), 2) as avg_duration,
ROUND(AVG(gb_processed), 2) as avg_gb_processed,
ROUND(AVG(join_count), 1) as avg_joins,
-- 성능 지표
ROUND(PERCENTILE_CONT(duration_seconds, 0.5) OVER(PARTITION BY query_pattern), 2) as median_duration,
ROUND(PERCENTILE_CONT(duration_seconds, 0.95) OVER(PARTITION BY query_pattern), 2) as p95_duration,
-- 최적화 우선순위
ROUND(AVG(duration_seconds) * COUNT(*), 0) as optimization_priority_score
FROM query_patterns
GROUP BY query_pattern
ORDER BY optimization_priority_score DESC;
3. 비용 모니터링
3.1 일일 비용 추적
-- 일일 BigQuery 비용 분석
CREATE OR REPLACE VIEW `project.monitoring.daily_cost_analysis` AS
WITH cost_calculation AS (
SELECT
DATE(creation_time) as cost_date,
user_email,
project_id,
job_type,
-- 데이터 처리 비용 ($5/TB)
SUM(total_bytes_processed) / 1024 / 1024 / 1024 / 1024 * 5 as scan_cost_usd,
-- 슬롯 비용 (Flex slots 기준)
SUM(total_slot_ms) / 1000 / 3600 * 0.04 as slot_cost_usd,
-- 스토리지 비용 추정
SUM(COALESCE(destination_table_bytes_written, 0)) / 1024 / 1024 / 1024 * 0.02 as storage_cost_usd,
COUNT(*) as job_count,
SUM(total_bytes_processed) / 1024 / 1024 / 1024 as total_gb_processed
FROM `project.region-us.INFORMATION_SCHEMA.JOBS_BY_PROJECT`
WHERE creation_time >= TIMESTAMP_SUB(CURRENT_TIMESTAMP(), INTERVAL 30 DAY)
GROUP BY cost_date, user_email, project_id, job_type
)
SELECT
cost_date,
user_email,
project_id,
job_type,
scan_cost_usd,
slot_cost_usd,
storage_cost_usd,
scan_cost_usd + slot_cost_usd + storage_cost_usd as total_estimated_cost_usd,
job_count,
total_gb_processed,
-- 효율성 메트릭
ROUND((scan_cost_usd + slot_cost_usd) / NULLIF(job_count, 0), 4) as cost_per_job,
ROUND((scan_cost_usd + slot_cost_usd) / NULLIF(total_gb_processed, 0), 4) as cost_per_gb,
-- 월별 추정
(scan_cost_usd + slot_cost_usd + storage_cost_usd) * 30 as monthly_projection_usd
FROM cost_calculation
WHERE scan_cost_usd + slot_cost_usd + storage_cost_usd > 0
ORDER BY cost_date DESC, total_estimated_cost_usd DESC;
3.2 비용 이상 감지
-- 비용 급증 감지
WITH daily_costs AS (
SELECT
DATE(creation_time) as cost_date,
SUM(total_bytes_processed) / 1024 / 1024 / 1024 / 1024 * 5 as daily_cost
FROM `project.region-us.INFORMATION_SCHEMA.JOBS_BY_PROJECT`
WHERE creation_time >= TIMESTAMP_SUB(CURRENT_TIMESTAMP(), INTERVAL 30 DAY)
GROUP BY cost_date
),
cost_trends AS (
SELECT
cost_date,
daily_cost,
AVG(daily_cost) OVER (
ORDER BY cost_date
ROWS BETWEEN 6 PRECEDING AND 1 PRECEDING
) as avg_cost_7days,
STDDEV(daily_cost) OVER (
ORDER BY cost_date
ROWS BETWEEN 6 PRECEDING AND 1 PRECEDING
) as stddev_cost_7days
FROM daily_costs
)
SELECT
cost_date,
ROUND(daily_cost, 2) as daily_cost_usd,
ROUND(avg_cost_7days, 2) as avg_cost_7days_usd,
-- 이상 감지
CASE
WHEN daily_cost > avg_cost_7days + 2 * stddev_cost_7days THEN 'ALERT: High Cost'
WHEN daily_cost > avg_cost_7days + stddev_cost_7days THEN 'WARNING: Above Average'
WHEN daily_cost < avg_cost_7days - stddev_cost_7days THEN 'INFO: Below Average'
ELSE 'NORMAL'
END as cost_anomaly_status,
-- 증가율
ROUND((daily_cost - avg_cost_7days) / NULLIF(avg_cost_7days, 0) * 100, 1) as cost_change_pct
FROM cost_trends
WHERE cost_date >= DATE_SUB(CURRENT_DATE(), INTERVAL 14 DAY)
ORDER BY cost_date DESC;
4. 슬롯 사용량 모니터링
4.1 실시간 슬롯 모니터링
-- 시간별 슬롯 사용량 분석
WITH hourly_slots AS (
SELECT
DATETIME_TRUNC(DATETIME(start_time), HOUR) as hour_start,
job_id,
user_email,
total_slot_ms,
TIMESTAMP_DIFF(end_time, start_time, SECOND) as duration_seconds,
total_slot_ms / 1000 / TIMESTAMP_DIFF(end_time, start_time, SECOND) as avg_slots_used
FROM `project.region-us.INFORMATION_SCHEMA.JOBS_BY_PROJECT`
WHERE start_time >= TIMESTAMP_SUB(CURRENT_TIMESTAMP(), INTERVAL 24 HOUR)
AND job_type = 'QUERY'
AND state = 'DONE'
AND total_slot_ms > 0
)
SELECT
hour_start,
-- 슬롯 사용량 통계
COUNT(*) as concurrent_jobs,
ROUND(SUM(avg_slots_used), 0) as total_avg_slots,
ROUND(AVG(avg_slots_used), 0) as avg_slots_per_job,
ROUND(MAX(avg_slots_used), 0) as max_slots_single_job,
-- 리소스 경합 분석
CASE
WHEN SUM(avg_slots_used) > 2000 THEN 'HIGH_CONTENTION'
WHEN SUM(avg_slots_used) > 1000 THEN 'MEDIUM_CONTENTION'
ELSE 'LOW_CONTENTION'
END as slot_contention_level,
-- 상위 사용자
STRING_AGG(
DISTINCT CONCAT(user_email, ':', CAST(ROUND(avg_slots_used, 0) AS STRING)),
', '
ORDER BY avg_slots_used DESC
LIMIT 3
) as top_slot_users
FROM hourly_slots
GROUP BY hour_start
ORDER BY hour_start DESC;
4.2 슬롯 효율성 분석
-- 슬롯 효율성 분석
SELECT
user_email,
DATE(creation_time) as usage_date,
COUNT(*) as total_queries,
ROUND(AVG(total_slot_ms / 1000 / 60), 0) as avg_slot_minutes,
ROUND(SUM(total_slot_ms / 1000 / 60 / 60), 2) as total_slot_hours,
-- 효율성 메트릭
ROUND(
AVG(total_bytes_processed / NULLIF(total_slot_ms, 0) * 1000), 2
) as avg_bytes_per_slot_ms,
ROUND(
AVG(TIMESTAMP_DIFF(end_time, start_time, SECOND) / NULLIF(total_slot_ms / 1000, 0)), 4
) as slot_utilization_ratio,
-- 성능 등급
CASE
WHEN AVG(total_bytes_processed / NULLIF(total_slot_ms, 0) * 1000) > 2000 THEN 'Excellent'
WHEN AVG(total_bytes_processed / NULLIF(total_slot_ms, 0) * 1000) > 1000 THEN 'Good'
WHEN AVG(total_bytes_processed / NULLIF(total_slot_ms, 0) * 1000) > 500 THEN 'Fair'
ELSE 'Needs Optimization'
END as efficiency_grade
FROM `project.region-us.INFORMATION_SCHEMA.JOBS_BY_PROJECT`
WHERE DATE(creation_time) >= DATE_SUB(CURRENT_DATE(), INTERVAL 7 DAY)
AND job_type = 'QUERY'
AND state = 'DONE'
AND total_slot_ms > 0
GROUP BY user_email, DATE(creation_time)
HAVING COUNT(*) >= 5 -- 최소 5개 쿼리
ORDER BY total_slot_hours DESC;
5. 알림 및 자동화
5.1 비용 알림 시스템
-- 비용 임계값 알림
CREATE OR REPLACE PROCEDURE `project.monitoring.check_cost_thresholds`()
BEGIN
DECLARE daily_cost FLOAT64;
DECLARE monthly_projection FLOAT64;
DECLARE alert_message STRING;
-- 오늘의 비용 계산
SET daily_cost = (
SELECT SUM(total_bytes_processed) / 1024 / 1024 / 1024 / 1024 * 5
FROM `project.region-us.INFORMATION_SCHEMA.JOBS_BY_PROJECT`
WHERE DATE(creation_time) = CURRENT_DATE()
);
SET monthly_projection = daily_cost * 30;
-- 임계값 확인 및 알림 생성
IF daily_cost > 100 THEN -- $100/day 임계값
SET alert_message = CONCAT(
'CRITICAL: Daily BigQuery cost exceeded $100. Current: $',
ROUND(daily_cost, 2),
'. Monthly projection: $',
ROUND(monthly_projection, 2)
);
INSERT INTO `project.monitoring.cost_alerts` (
alert_timestamp,
alert_level,
daily_cost,
monthly_projection,
message
) VALUES (
CURRENT_TIMESTAMP(),
'CRITICAL',
daily_cost,
monthly_projection,
alert_message
);
ELSEIF daily_cost > 50 THEN -- $50/day 경고
SET alert_message = CONCAT(
'WARNING: Daily BigQuery cost exceeded $50. Current: $',
ROUND(daily_cost, 2)
);
INSERT INTO `project.monitoring.cost_alerts` (
alert_timestamp,
alert_level,
daily_cost,
monthly_projection,
message
) VALUES (
CURRENT_TIMESTAMP(),
'WARNING',
daily_cost,
monthly_projection,
alert_message
);
END IF;
END;
5.2 성능 알림
-- 성능 이슈 감지 및 알림
CREATE OR REPLACE PROCEDURE `project.monitoring.detect_performance_issues`()
BEGIN
-- 장시간 실행 쿼리 감지
INSERT INTO `project.monitoring.performance_alerts` (
alert_timestamp,
alert_type,
job_id,
user_email,
duration_minutes,
message
)
SELECT
CURRENT_TIMESTAMP(),
'LONG_RUNNING_QUERY',
job_id,
user_email,
TIMESTAMP_DIFF(end_time, start_time, SECOND) / 60,
CONCAT(
'Query ran for ',
TIMESTAMP_DIFF(end_time, start_time, SECOND) / 60,
' minutes. Job ID: ', job_id
)
FROM `project.region-us.INFORMATION_SCHEMA.JOBS_BY_PROJECT`
WHERE DATE(creation_time) = CURRENT_DATE()
AND TIMESTAMP_DIFF(end_time, start_time, SECOND) > 3600 -- 1시간 이상
AND job_type = 'QUERY'
AND state = 'DONE';
-- 높은 슬롯 사용량 감지
INSERT INTO `project.monitoring.performance_alerts` (
alert_timestamp,
alert_type,
job_id,
user_email,
slot_hours,
message
)
SELECT
CURRENT_TIMESTAMP(),
'HIGH_SLOT_USAGE',
job_id,
user_email,
total_slot_ms / 1000 / 60 / 60,
CONCAT(
'Query used ',
ROUND(total_slot_ms / 1000 / 60 / 60, 1),
' slot hours. Consider optimization.'
)
FROM `project.region-us.INFORMATION_SCHEMA.JOBS_BY_PROJECT`
WHERE DATE(creation_time) = CURRENT_DATE()
AND total_slot_ms > 7200000 -- 2시간 이상 슬롯 사용
AND job_type = 'QUERY'
AND state = 'DONE';
END;
6. 대시보드 구축
6.1 실시간 모니터링 대시보드
-- 실시간 BigQuery 모니터링 대시보드 뷰
CREATE OR REPLACE VIEW `project.monitoring.realtime_dashboard` AS
WITH current_stats AS (
SELECT
COUNT(*) as running_jobs,
COUNT(DISTINCT user_email) as active_users,
SUM(total_slot_ms) / 1000 / 60 / 60 as current_slot_hours,
SUM(total_bytes_processed) / 1024 / 1024 / 1024 as current_gb_processed
FROM `project.region-us.INFORMATION_SCHEMA.JOBS_BY_PROJECT`
WHERE state = 'RUNNING'
),
today_stats AS (
SELECT
COUNT(*) as todays_jobs,
COUNT(CASE WHEN error_result IS NOT NULL THEN 1 END) as failed_jobs,
SUM(total_bytes_processed) / 1024 / 1024 / 1024 / 1024 * 5 as estimated_cost_today,
AVG(TIMESTAMP_DIFF(end_time, start_time, SECOND)) as avg_duration_seconds
FROM `project.region-us.INFORMATION_SCHEMA.JOBS_BY_PROJECT`
WHERE DATE(creation_time) = CURRENT_DATE()
AND job_type = 'QUERY'
),
hourly_trend AS (
SELECT
EXTRACT(HOUR FROM creation_time) as hour,
COUNT(*) as hourly_jobs,
SUM(total_bytes_processed) / 1024 / 1024 / 1024 as hourly_gb
FROM `project.region-us.INFORMATION_SCHEMA.JOBS_BY_PROJECT`
WHERE DATE(creation_time) = CURRENT_DATE()
AND job_type = 'QUERY'
GROUP BY hour
),
top_users_today AS (
SELECT
user_email,
COUNT(*) as user_jobs,
SUM(total_bytes_processed) / 1024 / 1024 / 1024 as user_gb_processed
FROM `project.region-us.INFORMATION_SCHEMA.JOBS_BY_PROJECT`
WHERE DATE(creation_time) = CURRENT_DATE()
AND job_type = 'QUERY'
GROUP BY user_email
ORDER BY user_gb_processed DESC
LIMIT 10
)
SELECT
CURRENT_TIMESTAMP() as dashboard_updated,
-- 현재 상태
cs.running_jobs,
cs.active_users,
ROUND(cs.current_slot_hours, 2) as current_slot_hours,
ROUND(cs.current_gb_processed, 2) as current_gb_processed,
-- 오늘 통계
ts.todays_jobs,
ts.failed_jobs,
ROUND(ts.estimated_cost_today, 2) as estimated_cost_today_usd,
ROUND(ts.avg_duration_seconds / 60, 1) as avg_duration_minutes,
-- 성공률
ROUND((ts.todays_jobs - ts.failed_jobs) / NULLIF(ts.todays_jobs, 0) * 100, 1) as success_rate_pct,
-- 시간별 트렌드
ARRAY(
SELECT STRUCT(hour, hourly_jobs, ROUND(hourly_gb, 1) as hourly_gb)
FROM hourly_trend
ORDER BY hour
) as hourly_trends,
-- 상위 사용자
ARRAY(
SELECT STRUCT(user_email, user_jobs, ROUND(user_gb_processed, 1) as gb_processed)
FROM top_users_today
) as top_users
FROM current_stats cs
CROSS JOIN today_stats ts;
6.2 주간 성능 리포트
-- 주간 성능 및 비용 리포트
CREATE OR REPLACE PROCEDURE `project.monitoring.generate_weekly_report`()
BEGIN
CREATE OR REPLACE TABLE `project.monitoring.weekly_reports` AS
WITH weekly_summary AS (
SELECT
DATE_TRUNC(DATE(creation_time), WEEK) as week_start,
-- 기본 메트릭
COUNT(*) as total_queries,
COUNT(DISTINCT user_email) as unique_users,
COUNT(CASE WHEN error_result IS NOT NULL THEN 1 END) as failed_queries,
-- 성능 메트릭
ROUND(AVG(TIMESTAMP_DIFF(end_time, start_time, SECOND)), 1) as avg_duration_seconds,
ROUND(PERCENTILE_CONT(TIMESTAMP_DIFF(end_time, start_time, SECOND), 0.95) OVER(PARTITION BY DATE_TRUNC(DATE(creation_time), WEEK)), 1) as p95_duration_seconds,
-- 리소스 사용량
ROUND(SUM(total_bytes_processed) / 1024 / 1024 / 1024, 1) as total_gb_processed,
ROUND(SUM(total_slot_ms) / 1000 / 60 / 60, 1) as total_slot_hours,
-- 비용 추정
ROUND(SUM(total_bytes_processed) / 1024 / 1024 / 1024 / 1024 * 5, 2) as estimated_cost_usd,
-- 트렌드 (전주 대비)
LAG(COUNT(*)) OVER (ORDER BY DATE_TRUNC(DATE(creation_time), WEEK)) as prev_week_queries,
LAG(SUM(total_bytes_processed) / 1024 / 1024 / 1024) OVER (ORDER BY DATE_TRUNC(DATE(creation_time), WEEK)) as prev_week_gb
FROM `project.region-us.INFORMATION_SCHEMA.JOBS_BY_PROJECT`
WHERE DATE(creation_time) >= DATE_SUB(CURRENT_DATE(), INTERVAL 8 WEEK)
AND job_type = 'QUERY'
GROUP BY week_start
)
SELECT
week_start,
total_queries,
unique_users,
failed_queries,
ROUND(failed_queries / total_queries * 100, 2) as failure_rate_pct,
avg_duration_seconds,
p95_duration_seconds,
total_gb_processed,
total_slot_hours,
estimated_cost_usd,
-- 주간 트렌드
CASE
WHEN prev_week_queries IS NULL THEN 'N/A'
ELSE CONCAT(
CAST(ROUND((total_queries - prev_week_queries) / prev_week_queries * 100, 1) AS STRING),
'%'
)
END as query_growth_pct,
CASE
WHEN prev_week_gb IS NULL THEN 'N/A'
ELSE CONCAT(
CAST(ROUND((total_gb_processed - prev_week_gb) / prev_week_gb * 100, 1) AS STRING),
'%'
)
END as data_growth_pct,
CURRENT_TIMESTAMP() as report_generated_at
FROM weekly_summary
ORDER BY week_start DESC;
-- 리포트 생성 로그
INSERT INTO `project.monitoring.report_log` (
report_type,
report_date,
generated_at
) VALUES (
'WEEKLY_PERFORMANCE',
CURRENT_DATE(),
CURRENT_TIMESTAMP()
);
END;
BigQuery 모니터링을 통해 시스템의 성능, 비용, 사용량을 체계적으로 관리할 수 있습니다. 정기적인 모니터링과 알림 시스템을 구축하여 최적의 BigQuery 운영 환경을 만들어보세요.