#!/usr/bin/env python3
"""Generate sheet_staging.json from content_data_v2.json"""
import json, sys, os
from pathlib import Path

ROOT = Path("/Users/bryce/FLSM")
cycle_id = sys.argv[1] if len(sys.argv) > 1 else "lasting-language-therapy-2026-04-14"
brief = ROOT / ".tmp/briefs" / cycle_id
data_file = brief / "content_data_v2.json"

with open(data_file) as f:
    d = json.load(f)

pieces = d.get("pieces", [])
welcome = d.get("welcome_sequence", {})
client_id = d.get("client_id", "lasting-language")

scripts_rows, schedule_rows, blog_rows, nurture_rows = [], [], [], []
blog_n = 0

for p in pieces:
    pid = p.get("id", "")
    ptype = p.get("type", "")
    week = p.get("week", 1)
    day = p.get("day", "")
    date = p.get("date", "")
    angle = p.get("angle", "")
    pillar = p.get("pillar", "")
    notes = f"{angle} | {pillar}"

    if ptype == "youtube":
        s = p.get("script", {})
        script_text = "\n\n".join(filter(None, [
            f"HOOK:\n{s.get('hook','')}",
            f"OPEN:\n{s.get('open','')}",
            f"EPIPHANY OPEN:\n{s.get('epiphany_open','')}",
            f"CORE CONTENT:\n{s.get('core_content','')}",
            f"PROOF BRIDGE:\n{s.get('proof_bridge','')}",
            f"OFFER CLOSE:\n{s.get('offer_close','')}",
        ]))
        caption = s.get("youtube_description", "")
        title = p.get("title", f"YouTube {pid}")
        mode = p.get("mode","TEACH")
        scripts_rows.append([title, "youtube", "YouTube", script_text, caption, "Draft", f"{notes} | {mode}", ""])
        schedule_rows.append([f"Week {week}", day, title, "YouTube"])

    elif ptype == "short":
        clips = p.get("clips", [])
        platforms = " / ".join([c.get("platform","") for c in clips])
        parts = [f"--- {c.get('platform','').upper()} ({c.get('duration','')}) ---\n{c.get('script','')}" for c in clips]
        captions = [f"--- {c.get('platform','').upper()} ---\n{c.get('caption','')}" for c in clips]
        parent = p.get("parent","")
        title = f"Shorts from {parent.upper()}"
        scripts_rows.append([title, "short", platforms, "\n\n".join(parts), "\n\n".join(captions), "Draft", notes, ""])
        schedule_rows.append([f"Week {week}", day, title, platforms])

    elif ptype == "linkedin_carousel":
        slides = p.get("slides", [])
        body = "\n\n".join([f"Slide {s.get('num',i+1)}: {s.get('headline','')}\n{s.get('body','')}" for i,s in enumerate(slides)])
        caption = p.get("caption","")
        fc = p.get("first_comment_engagement","")
        if fc: caption += f"\n\n---FIRST COMMENT---\n{fc}"
        title = p.get("title", f"LinkedIn Carousel {pid}")
        scripts_rows.append([title, "linkedin_carousel", "LinkedIn", body, caption, "Draft", notes, ""])
        schedule_rows.append([f"Week {week}", day, title, "LinkedIn"])

    elif ptype == "igtt_carousel":
        ig = p.get("instagram", {})
        tt = p.get("tiktok", {})
        ig_slides = ig.get("slides", p.get("slides",[]))
        tt_slides = tt.get("slides", [])
        ig_text = "\n\n".join([f"IG Slide {s.get('num',i+1)}: {s.get('headline','')}\n{s.get('body','')}" for i,s in enumerate(ig_slides)])
        tt_text = "\n\n".join([f"TT Slide {s.get('num',i+1)}: {s.get('text', s.get('headline',''))}" for i,s in enumerate(tt_slides)])
        ig_cap = ig.get("caption","") + "\n---SAVE CTA---\n" + ig.get("cta","")
        tt_cap = tt.get("caption","")
        title_ig = p.get("title", f"IG/TT Carousel {pid}")
        scripts_rows.append([title_ig, "ig_carousel", "Instagram", ig_text, ig_cap, "Draft", notes, ""])
        scripts_rows.append([f"TikTok: {title_ig}", "tiktok_carousel", "TikTok", tt_text, tt_cap, "Draft", notes, ""])
        schedule_rows.append([f"Week {week}", day, title_ig, "Instagram, TikTok"])

    elif ptype == "blog":
        blog_n += 1
        title = p.get("title","")
        body = p.get("body","")
        keyword = p.get("keyword","")
        meta = p.get("meta_description","")
        slug = p.get("url_slug","")
        li_share = p.get("linkedin_share","")
        est_pub = date
        blog_rows.append([title, "blog", "Draft", "", meta, keyword, est_pub])
        img = f"images/blog_{blog_n}_header.jpg"
        scripts_rows.append([title, "blog", "Blog", body, li_share, "Draft", f"{notes} | kw: {keyword}", img])
        schedule_rows.append([f"Week {week}", day, title, "Blog"])

    elif ptype == "email":
        subj = p.get("subject", p.get("subject_line",""))
        preview = p.get("preview_text","")
        body_html = p.get("body_html", p.get("body",""))
        nurture_rows.append([subj, preview, body_html, date, "Draft", "Nurture"])
        scripts_rows.append([subj, "email", "Email", body_html, f"Subject: {subj} | Preview: {preview}", "Draft", notes, ""])
        schedule_rows.append([f"Week {week}", day, subj, "Email"])

# Welcome sequence
for i, em in enumerate(welcome.get("emails",[]), 1):
    subj = em.get("subject","")
    preview = em.get("preview_text","")
    body_html = em.get("body_html", em.get("body",""))
    day_label = em.get("send_day", em.get("day", f"Day {i}"))
    nurture_rows.append([subj, preview, body_html, str(day_label), "Draft", "Welcome"])

staging = {
    "cycle_id": cycle_id,
    "client_id": client_id,
    "scripts_rows": scripts_rows,
    "schedule_rows": schedule_rows,
    "blog_rows": blog_rows,
    "nurture_rows": nurture_rows,
}
out = brief / "sheet_staging.json"
with open(out, "w") as f:
    json.dump(staging, f, indent=2)
print(f"Done: {len(scripts_rows)} scripts, {len(schedule_rows)} schedule rows, {len(blog_rows)} blogs, {len(nurture_rows)} nurture/welcome")
