from fastapi import APIRouter, HTTPException, UploadFile, File
from fastapi.responses import Response
import os
import base64
import struct
import uuid
import datetime
import httpx

from openai import OpenAI
from google import genai
from google.genai import types

from schemas import RobotRequest
from config import logger, openai_api_key
import db_module

router = APIRouter()

# Clients
openai_client = OpenAI(api_key=openai_api_key)
gemini_client = genai.Client(api_key=os.environ.get("GEMINI_API_KEY"))

# -----------------------------
# Utils
# -----------------------------
def get_current_season_and_time():
    jst = datetime.timezone(datetime.timedelta(hours=9), "JST")
    now = datetime.datetime.now(jst)
    month = now.month
    hour = now.hour

    if 3 <= month <= 5:
        season = "春"
    elif 6 <= month <= 8:
        season = "夏"
    elif 9 <= month <= 11:
        season = "秋"
    else:
        season = "冬"

    if 5 <= hour < 11:
        time_greeting = "朝"
    elif 11 <= hour < 17:
        time_greeting = "昼"
    else:
        time_greeting = "夜"

    return {
        "date": now.strftime("%Y年%m月%d日"),
        "time": now.strftime("%H:%M"),
        "season": season,
        "time_greeting": time_greeting,
    }


async def get_current_weather():
    api_key = os.environ.get("OPENWEATHER_API_KEY")
    city_name = "Sapporo"
    if not api_key:
        logger.warning("OPENWEATHER_API_KEY가 없습니다. 더미 날씨를 반환합니다.")
        return "晴れ、気温0度"

    url = "https://api.openweathermap.org/data/2.5/weather"
    params = {"q": city_name, "appid": api_key, "units": "metric", "lang": "ja"}

    try:
        async with httpx.AsyncClient() as client_http:
            res = await client_http.get(url, params=params)
            res.raise_for_status()
            data = res.json()
            description = data["weather"][0]["description"]
            temp = round(data["main"]["temp"])
            return f"{description}、気温{temp}度"
    except Exception as e:
        logger.error(f"날씨 API 오류: {e}")
        return "天気情報が取得できませんでした"


def pcm_to_wav(pcm_data: bytes, sample_rate: int = 24000, channels: int = 1, bits_per_sample: int = 16) -> bytes:
    """PCM16 → WAV bytes (44byte header + pcm)"""
    byte_rate = sample_rate * channels * bits_per_sample // 8
    block_align = channels * bits_per_sample // 8
    data_size = len(pcm_data)

    wav_header = struct.pack(
        "<4sI4s4sIHHIIHH4sI",
        b"RIFF",
        36 + data_size,
        b"WAVE",
        b"fmt ",
        16,
        1,
        channels,
        sample_rate,
        byte_rate,
        block_align,
        bits_per_sample,
        b"data",
        data_size,
    )
    return wav_header + pcm_data


async def synthesize_wav_gemini(text: str, voice_name: str = "Aoede") -> bytes:
    """
    Gemini TTS → PCM (24kHz) → WAV bytes
    """
    if not text or not text.strip():
        raise HTTPException(status_code=400, detail="Text is required")

    t0 = datetime.datetime.now()

    # (네가 보여준 코드처럼 프롬프트를 살짝 보강 가능)
    prompt_text = f"Read aloud in a warm, welcoming tone.\n{text}"

    resp = gemini_client.models.generate_content(
        model="gemini-2.5-flash-preview-tts",
        contents=prompt_text,
        config=types.GenerateContentConfig(
            response_modalities=["AUDIO"],
            speech_config=types.SpeechConfig(
                voice_config=types.VoiceConfig(
                    prebuilt_voice_config=types.PrebuiltVoiceConfig(voice_name=voice_name)
                )
            ),
        ),
    )

    # 방어 코드(네가 겪은 NoneType.parts 방지)
    if not getattr(resp, "candidates", None):
        raise HTTPException(status_code=500, detail="Empty TTS response (no candidates)")

    cand0 = resp.candidates[0]
    content = getattr(cand0, "content", None)
    if not content or not getattr(content, "parts", None):
        raise HTTPException(status_code=500, detail="Empty TTS response (no parts)")

    part0 = content.parts[0]
    inline_data = getattr(part0, "inline_data", None)
    if not inline_data or not hasattr(inline_data, "data"):
        raise HTTPException(status_code=500, detail="Empty TTS response (no inline audio data)")

    audio_data = inline_data.data
    pcm_bytes = audio_data if isinstance(audio_data, (bytes, bytearray)) else base64.b64decode(audio_data)

    wav_bytes = pcm_to_wav(pcm_bytes, sample_rate=24000, channels=1, bits_per_sample=16)

    t1 = datetime.datetime.now()
    logger.info(f"[TTS] Gemini->WAV done. chars={len(text)} wav_bytes={len(wav_bytes)} time={(t1-t0).total_seconds():.2f}s")

    return wav_bytes


# -----------------------------
# Health
# -----------------------------
@router.get("/health")
async def health_check():
    return {"status": "healthy"}


# -----------------------------
# STT (파일 업로드) - 그대로 유지
# -----------------------------
@router.post("/sanwa/gpt-backup/stt-from-file")
async def process_stt_from_file(file: UploadFile = File(...)):
    try:
        if not file:
            raise HTTPException(status_code=400, detail="오디오 파일이 없습니다.")
        logger.info(f"STT 파일 수신: {file.filename}, type={file.content_type}")

        transcription_response = openai_client.audio.transcriptions.create(
            model="whisper-1",
            file=(file.filename, file.file, file.content_type),
            language="ja",
        )
        transcribed_text = transcription_response.text
        logger.info(f"Whisper STT 변환 결과: {transcribed_text}")
        return {"status": "success", "text": transcribed_text}

    except Exception as e:
        logger.error(f"STT 처리 오류: {e}")
        raise HTTPException(status_code=500, detail=f"STT 변환 실패: {e}")


# -----------------------------
# Robot Multi-turn (WAV를 직접 반환)
# -----------------------------
FIRST_AI_PROMPT = "良い一日をお過ごしください。お名前は何ですか？"

@router.post("/sanwa/gpt-backup/process-text")
async def process_robot_multi_turn(request: RobotRequest):
    """
    - GPT 응답 생성
    - DB 저장
    - TTS를 WAV로 변환해서 바이너리(audio/wav)로 바로 응답
    """
    user_text = request.user_input
    session_id = request.session_id
    is_last_turn = request.is_last_turn

    logger.info(f"로봇 멀티턴 요청 수신: session_id={session_id}, input={user_text}")

    try:
        # 1) 세션 ID 없으면 새로 생성 + 첫 발화 저장
        if not session_id:
            session_id = f"robot-{uuid.uuid4()}"
            logger.info(f"새 세션 생성: {session_id}")
            await db_module.save_robot_chat_message(session_id, "assistant", FIRST_AI_PROMPT)

        # 2) 사용자 발화 저장
        await db_module.save_robot_chat_message(session_id, "user", user_text)

        # 3) 히스토리 로드
        history_rows = await db_module.get_robot_chat_history(session_id)
        history_messages = [{"role": row["role"], "content": row["content"]} for row in history_rows]

        # 4) 컨텍스트
        context = get_current_season_and_time()
        weather = await get_current_weather()

        # 5) 시스템 프롬프트
        system_prompt = f"""
# あなたの役割
あなたは、キャンパスに展示されている、親しみやすく知的な対話型ロボットです。
あなたの主な目的は、学生、教職員、訪問者と自然で、役に立ち、時宜にかなった（その時々に合った）会話を行うことです。

# 会話のルール
1. 常に礼儀正しく、親しみやすいトーンで応答してください。
2. 質問に対して、単に情報で答えるだけでなく、会話を広げるような（キャッチボールするような）応答を心がけてください。
3. 以下の「現在のコンテキスト」情報を、会話の中に自然に織り込んでください。

# 現在のコンテキスト
* 現在の日付: {context['date']}
* 現在の時刻: {context['time']} ({context['time_greeting']})
* 現在の季節: {context['season']}
* 今日の天気: {weather}
* あなたの場所: 大学 エントランス
"""

        if is_last_turn:
            system_prompt += """
# 最終応答の指示
会話の最後のターンです。相手の言葉に共感・同意した後、応答の最後は必ず「お元気でね」で締めくくってください。
※重要: 言い換えず、一字一句正確に「お元気でね」を使用してください。
"""

        messages_for_gpt = [{"role": "system", "content": system_prompt}] + history_messages

        # 6) GPT 호출
        chat_response = openai_client.chat.completions.create(
            model="gpt-4o",
            messages=messages_for_gpt,
            max_tokens=150,
        )
        gpt_response_text = chat_response.choices[0].message.content
        logger.info(f"GPT 응답: {gpt_response_text}")

        # 7) GPT 응답 저장
        await db_module.save_robot_chat_message(session_id, "assistant", gpt_response_text)

        # 8) TTS(WAV) 생성
        wav_bytes = await synthesize_wav_gemini(gpt_response_text, voice_name="Aoede")

        # 9) WAV 바이너리로 바로 반환 + 세션ID는 헤더에 넣어줌(로봇이 읽을 수 있게)
        return Response(
            content=wav_bytes,
            media_type="audio/wav",
            headers={
                "Cache-Control": "no-cache",
                "X-Session-Id": session_id,
            },
        )

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"process-text 처리 중 오류: {e}", exc_info=True)
        raise HTTPException(status_code=500, detail=f"서버 내부 오류: {e}")
