#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ 測試報告生成器 ============== 生成詳細的 MCP 測試報告,支持多種格式輸出。 """ import json import time from datetime import datetime, timedelta from typing import Dict, Any, List, Optional from pathlib import Path from dataclasses import dataclass, asdict from .config import TestConfig, DEFAULT_CONFIG from .utils import TestUtils from .validators import TestValidators, ValidationResult from ..debug import debug_log @dataclass class TestReport: """測試報告數據結構""" timestamp: str duration: float total_scenarios: int passed_scenarios: int failed_scenarios: int success_rate: float scenarios: List[Dict[str, Any]] validation_summary: Dict[str, Any] performance_summary: Dict[str, Any] system_info: Dict[str, Any] config: Dict[str, Any] errors: List[str] warnings: List[str] class TestReporter: """測試報告生成器""" def __init__(self, config: Optional[TestConfig] = None): self.config = config or DEFAULT_CONFIG self.validators = TestValidators(config) def generate_report(self, test_results: Dict[str, Any]) -> TestReport: """生成測試報告""" start_time = time.time() # 提取基本信息 scenarios = test_results.get("results", []) total_scenarios = test_results.get("total_scenarios", len(scenarios)) passed_scenarios = test_results.get("passed_scenarios", 0) failed_scenarios = test_results.get("failed_scenarios", 0) # 計算成功率 success_rate = passed_scenarios / total_scenarios if total_scenarios > 0 else 0 # 驗證測試結果 validation_results = {} for i, scenario in enumerate(scenarios): validation_results[f"scenario_{i}"] = self.validators.result_validator.validate_test_result(scenario) validation_summary = self.validators.get_validation_summary(validation_results) # 生成性能摘要 performance_summary = self._generate_performance_summary(scenarios) # 收集錯誤和警告 all_errors = [] all_warnings = [] for scenario in scenarios: all_errors.extend(scenario.get("errors", [])) # 計算總持續時間 total_duration = 0 for scenario in scenarios: perf = scenario.get("performance", {}) duration = perf.get("total_duration", 0) or perf.get("total_time", 0) total_duration += duration # 創建報告 report = TestReport( timestamp=datetime.now().isoformat(), duration=total_duration, total_scenarios=total_scenarios, passed_scenarios=passed_scenarios, failed_scenarios=failed_scenarios, success_rate=success_rate, scenarios=scenarios, validation_summary=validation_summary, performance_summary=performance_summary, system_info=TestUtils.get_system_info(), config=self.config.to_dict(), errors=all_errors, warnings=all_warnings ) debug_log(f"📊 測試報告生成完成 (耗時: {time.time() - start_time:.2f}s)") return report def _generate_performance_summary(self, scenarios: List[Dict[str, Any]]) -> Dict[str, Any]: """生成性能摘要""" total_duration = 0 min_duration = float('inf') max_duration = 0 durations = [] memory_usage = [] for scenario in scenarios: perf = scenario.get("performance", {}) # 處理持續時間 duration = perf.get("total_duration", 0) or perf.get("total_time", 0) if duration > 0: total_duration += duration min_duration = min(min_duration, duration) max_duration = max(max_duration, duration) durations.append(duration) # 處理內存使用 memory_diff = perf.get("memory_diff", {}) if memory_diff: memory_usage.append(memory_diff) # 計算平均值 avg_duration = total_duration / len(durations) if durations else 0 # 計算中位數 if durations: sorted_durations = sorted(durations) n = len(sorted_durations) median_duration = ( sorted_durations[n // 2] if n % 2 == 1 else (sorted_durations[n // 2 - 1] + sorted_durations[n // 2]) / 2 ) else: median_duration = 0 return { "total_duration": total_duration, "total_duration_formatted": TestUtils.format_duration(total_duration), "avg_duration": avg_duration, "avg_duration_formatted": TestUtils.format_duration(avg_duration), "median_duration": median_duration, "median_duration_formatted": TestUtils.format_duration(median_duration), "min_duration": min_duration if min_duration != float('inf') else 0, "min_duration_formatted": TestUtils.format_duration(min_duration if min_duration != float('inf') else 0), "max_duration": max_duration, "max_duration_formatted": TestUtils.format_duration(max_duration), "scenarios_with_performance": len(durations), "memory_usage_samples": len(memory_usage) } def save_report(self, report: TestReport, output_path: Optional[Path] = None) -> Path: """保存測試報告""" if output_path is None: output_dir = self.config.ensure_report_dir() timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") filename = f"mcp_test_report_{timestamp}.{self.config.report_format}" output_path = output_dir / filename output_path.parent.mkdir(parents=True, exist_ok=True) if self.config.report_format.lower() == "json": self._save_json_report(report, output_path) elif self.config.report_format.lower() == "html": self._save_html_report(report, output_path) elif self.config.report_format.lower() == "markdown": self._save_markdown_report(report, output_path) else: raise ValueError(f"不支持的報告格式: {self.config.report_format}") debug_log(f"📄 測試報告已保存: {output_path}") return output_path def _save_json_report(self, report: TestReport, output_path: Path): """保存 JSON 格式報告""" with open(output_path, 'w', encoding='utf-8') as f: json.dump(asdict(report), f, indent=2, ensure_ascii=False, default=str) def _save_html_report(self, report: TestReport, output_path: Path): """保存 HTML 格式報告""" html_content = self._generate_html_report(report) with open(output_path, 'w', encoding='utf-8') as f: f.write(html_content) def _save_markdown_report(self, report: TestReport, output_path: Path): """保存 Markdown 格式報告""" markdown_content = self._generate_markdown_report(report) with open(output_path, 'w', encoding='utf-8') as f: f.write(markdown_content) def _generate_html_report(self, report: TestReport) -> str: """生成 HTML 報告""" # 狀態圖標 status_icon = "✅" if report.success_rate == 1.0 else "❌" if report.success_rate == 0 else "⚠️" # 性能圖表數據(簡化版) scenario_names = [s.get("scenario_name", f"Scenario {i}") for i, s in enumerate(report.scenarios)] scenario_durations = [] for s in report.scenarios: perf = s.get("performance", {}) duration = perf.get("total_duration", 0) or perf.get("total_time", 0) scenario_durations.append(duration) html = f""" MCP 測試報告

🧪 MCP 測試報告

{status_icon} 測試完成

生成時間: {report.timestamp}

總測試數

{report.total_scenarios}

通過測試

{report.passed_scenarios}

失敗測試

{report.failed_scenarios}

成功率

{report.success_rate:.1%}

總耗時

{report.performance_summary.get('total_duration_formatted', 'N/A')}

平均耗時

{report.performance_summary.get('avg_duration_formatted', 'N/A')}

📋 測試場景詳情

""" for i, scenario in enumerate(report.scenarios): success = scenario.get("success", False) scenario_name = scenario.get("scenario_name", f"Scenario {i+1}") scenario_desc = scenario.get("scenario_description", "無描述") perf = scenario.get("performance", {}) duration = perf.get("total_duration", 0) or perf.get("total_time", 0) duration_str = TestUtils.format_duration(duration) if duration > 0 else "N/A" steps = scenario.get("steps", {}) completed_steps = sum(1 for v in steps.values() if v) total_steps = len(steps) errors = scenario.get("errors", []) html += f"""

{'✅' if success else '❌'} {scenario_name}

{scenario_desc}

狀態: {'通過' if success else '失敗'}
耗時: {duration_str}
完成步驟: {completed_steps}/{total_steps}
錯誤數: {len(errors)}
""" if errors: html += '
錯誤信息:
    ' for error in errors: html += f'
  • {error}
  • ' html += '
' html += '
' html += f"""

📊 性能統計

最快測試

{report.performance_summary.get('min_duration_formatted', 'N/A')}

最慢測試

{report.performance_summary.get('max_duration_formatted', 'N/A')}

中位數

{report.performance_summary.get('median_duration_formatted', 'N/A')}
""" return html def _generate_markdown_report(self, report: TestReport) -> str: """生成 Markdown 報告""" status_icon = "✅" if report.success_rate == 1.0 else "❌" if report.success_rate == 0 else "⚠️" md = f"""# 🧪 MCP 測試報告 {status_icon} **測試狀態**: {'全部通過' if report.success_rate == 1.0 else '部分失敗' if report.success_rate > 0 else '全部失敗'} **生成時間**: {report.timestamp} ## 📊 測試摘要 | 指標 | 數值 | |------|------| | 總測試數 | {report.total_scenarios} | | 通過測試 | {report.passed_scenarios} | | 失敗測試 | {report.failed_scenarios} | | 成功率 | {report.success_rate:.1%} | | 總耗時 | {report.performance_summary.get('total_duration_formatted', 'N/A')} | | 平均耗時 | {report.performance_summary.get('avg_duration_formatted', 'N/A')} | ## 📋 測試場景詳情 """ for i, scenario in enumerate(report.scenarios): success = scenario.get("success", False) scenario_name = scenario.get("scenario_name", f"Scenario {i+1}") scenario_desc = scenario.get("scenario_description", "無描述") perf = scenario.get("performance", {}) duration = perf.get("total_duration", 0) or perf.get("total_time", 0) duration_str = TestUtils.format_duration(duration) if duration > 0 else "N/A" steps = scenario.get("steps", {}) completed_steps = sum(1 for v in steps.values() if v) total_steps = len(steps) errors = scenario.get("errors", []) md += f"""### {'✅' if success else '❌'} {scenario_name} **描述**: {scenario_desc} - **狀態**: {'通過' if success else '失敗'} - **耗時**: {duration_str} - **完成步驟**: {completed_steps}/{total_steps} - **錯誤數**: {len(errors)} """ if errors: md += "**錯誤信息**:\n" for error in errors: md += f"- {error}\n" md += "\n" md += f"""## 📊 性能統計 | 指標 | 數值 | |------|------| | 最快測試 | {report.performance_summary.get('min_duration_formatted', 'N/A')} | | 最慢測試 | {report.performance_summary.get('max_duration_formatted', 'N/A')} | | 中位數 | {report.performance_summary.get('median_duration_formatted', 'N/A')} | ## 🔧 系統信息 | 項目 | 值 | |------|---| | CPU 核心數 | {report.system_info.get('cpu_count', 'N/A')} | | 總內存 | {report.system_info.get('memory_total', 'N/A')} | | 可用內存 | {report.system_info.get('memory_available', 'N/A')} | --- *報告由 MCP Feedback Enhanced 測試框架生成 | {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}* """ return md def print_summary(self, report: TestReport): """打印測試摘要到控制台""" status_icon = "✅" if report.success_rate == 1.0 else "❌" if report.success_rate == 0 else "⚠️" print("\n" + "="*60) print(f"🧪 MCP 測試報告摘要 {status_icon}") print("="*60) print(f"📊 總測試數: {report.total_scenarios}") print(f"✅ 通過測試: {report.passed_scenarios}") print(f"❌ 失敗測試: {report.failed_scenarios}") print(f"📈 成功率: {report.success_rate:.1%}") print(f"⏱️ 總耗時: {report.performance_summary.get('total_duration_formatted', 'N/A')}") print(f"⚡ 平均耗時: {report.performance_summary.get('avg_duration_formatted', 'N/A')}") if report.errors: print(f"\n❌ 發現 {len(report.errors)} 個錯誤:") for error in report.errors[:5]: # 只顯示前5個錯誤 print(f" • {error}") if len(report.errors) > 5: print(f" ... 還有 {len(report.errors) - 5} 個錯誤") print("="*60)