# trellis/audits/dsa-us/audit_recommend.py
# Generates ranked recommendations for the dsa-us account audit.
from trellis.audit import Audit, Finding, Recommendation
from trellis.platforms.google_ads import changelog, metrics
from trellis.stats import welch_t, cusum, bh_correct
from datetime import datetime, timedelta
# ─── Configuration ───────────────────────────────────────────────────────
ACCOUNT_ID = "dsa-us"
LOOKBACK_DAYS = 28 # 4-week pre/post window
MIN_SAMPLE_SIZE = 120 # clicks per arm before testing
ALPHA = 0.05 # BH-corrected family-wise rate
MIN_LIFT_PCT = 0.04 # ignore deltas below 4%
# ─── Pipeline ────────────────────────────────────────────────────────────
@audit.step(name="recommend", depends_on=["investigate"])
def build_recommendations(audit: Audit) -> list[Recommendation]:
"""Rank account-level fixes by expected lift × confidence."""
since = datetime.utcnow() - timedelta(days=LOOKBACK_DAYS)
edits = changelog.fetch(ACCOUNT_ID, since=since, kinds={"BID_STRATEGY", "KEYWORD", "BUDGET"})
prior, post = metrics.split_by_edits(ACCOUNT_ID, edits, window=LOOKBACK_DAYS)
# Welch's t per metric × CUSUM for changepoint, then BH multiple-test correction.
deltas = []
for edit in edits:
if prior[edit.id].clicks < MIN_SAMPLE_SIZE:
continue # under-powered, skip
t, p = welch_t(prior[edit.id].cpa, post[edit.id].cpa)
cp = cusum(post[edit.id].daily_cpa, threshold=3.0)
deltas.append((edit, t, p, cp))
significant = bh_correct(deltas, alpha=ALPHA, key=lambda d: d[2])
significant = [d for d in significant if abs(d[1]) >= MIN_LIFT_PCT]
recs = []
for edit, t, p, cp in significant:
recs.append(Recommendation.from_edit(edit, t_stat=t, p_value=p, cusum=cp))
recs.sort(key=lambda r: (-r.expected_lift * r.confidence, r.p_value))
recs = recs[:3] # top-3 only
audit.attach(step="recommend", payload=recs)
return recs
if __name__ == "__main__":
a = Audit.load(ACCOUNT_ID)
build_recommendations(a)
a.publish(channel="slack#dsa-us", format="digest")
print(f"emitted {len(a.recs)} recommendations")