#!/usr/bin/env bash
# ================================================================
# LiberMap — Plataforma Libernet
# Instalador v1.0.0
# Sistema de mapeamento de infraestrutura FTTH para ISPs
# https://saas.libernet.com.br
# ================================================================
set -euo pipefail

BASE_URL="https://saas.libernet.com.br/instalador/libermap"
INSTALL_DIR="/opt/libermap"
APP_USER="libermap"
PG_DB="libermap"
PG_USER="libermap"
VERSION="1.0.0"

RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'
CYAN='\033[0;36m'; BLUE='\033[0;34m'; BOLD='\033[1m'; NC='\033[0m'
info(){ echo -e "${CYAN}[INFO]${NC} $*"; }
ok(){   echo -e "${GREEN}[ OK ]${NC} $*"; }
warn(){ echo -e "${YELLOW}[WARN]${NC} $*"; }
err(){  echo -e "${RED}[ERRO]${NC} $*"; exit 1; }
step(){ echo -e "\n${BOLD}${BLUE}══ $* ══${NC}"; }

[[ $EUID -ne 0 ]] && err "Execute como root: sudo bash <(curl -fsSL $BASE_URL/bootstrap.sh)"

# Detecta reinstalação
REINSTALL=false
[[ -f "$INSTALL_DIR/.version" ]] && REINSTALL=true

clear
echo -e "${BOLD}${CYAN}"
cat << 'LOGO'
  _     _ _               __  __
 | |   (_) |__   ___ _ __|  \/  | __ _ _ __
 | |   | | '_ \ / _ \ '__| |\/| |/ _` | '_ \
 | |___| | |_) |  __/ |  | |  | | (_| | |_) |
 |_____|_|_.__/ \___|_|  |_|  |_|\__,_| .__/
                                        |_|
LOGO
echo -e "${NC}${BOLD}  Plataforma Libernet — Mapeamento de Redes FTTH${NC}"
echo -e "  ${CYAN}saas.libernet.com.br${NC}  |  v${VERSION}"
echo ""
[[ "$REINSTALL" == "true" ]] && warn "Instalação existente detectada em $INSTALL_DIR" && echo ""

# ── Coleta de configuração ────────────────────────────────────
step "Configuração"
read -rp "  Nome do ISP (ex: NetVale Telecom): "         ISP_NAME
[[ -z "$ISP_NAME" ]] && err "Nome obrigatório"
read -rp "  Slug do ISP (ex: netvalve, sem espaços): "   ISP_SLUG
ISP_SLUG=$(echo "$ISP_SLUG" | tr '[:upper:]' '[:lower:]' | tr ' ' '-' | tr -cd '[:alnum:]-')
[[ -z "$ISP_SLUG" ]] && err "Slug obrigatório"
read -rp "  E-mail do administrador: "                   ADMIN_EMAIL
[[ -z "$ADMIN_EMAIL" ]] && err "E-mail obrigatório"
read -rsp "  Senha do administrador (min 8 chars): "     ADMIN_PASS; echo
[[ ${#ADMIN_PASS} -lt 8 ]] && err "Mínimo 8 caracteres"
read -rp "  Domínio da API (ex: api.seuisp.com.br): "   PUBLIC_DOMAIN
[[ -z "$PUBLIC_DOMAIN" ]] && PUBLIC_DOMAIN=$(hostname -I | awk '{print $1}')
read -rp "  Chave de licença LiberMap (LM-XXXX-...): "  LICENSE_KEY
[[ -z "$LICENSE_KEY" ]] && err "License key obrigatória"
read -rp "  E-mail SSL Let's Encrypt (Enter pular): "   LE_EMAIL

LICENSE_SERVER="https://ispacs.libernet.com.br"
PGPASSWORD=$(openssl rand -hex 24)
JWT_SECRET=$(openssl rand -hex 64)
COOKIE_SECRET=$(openssl rand -hex 32)
MONITOR_KEY=$(openssl rand -hex 32)

# Se reinstalação, preserva senhas existentes do banco
if [[ "$REINSTALL" == "true" ]] && [[ -f "$INSTALL_DIR/.env" ]]; then
  OLD_PGPASS=$(grep "^PGPASSWORD=" "$INSTALL_DIR/.env" 2>/dev/null | cut -d= -f2 || echo "")
  OLD_JWT=$(grep "^JWT_SECRET=" "$INSTALL_DIR/.env" 2>/dev/null | cut -d= -f2 || echo "")
  [[ -n "$OLD_PGPASS" ]] && PGPASSWORD="$OLD_PGPASS" && info "Senha do banco preservada"
  [[ -n "$OLD_JWT" ]]    && JWT_SECRET="$OLD_JWT"    && info "JWT secret preservado"
fi

ok "Configuração pronta"

# ── 1. Dependências ───────────────────────────────────────────
step "Dependências do sistema"
export DEBIAN_FRONTEND=noninteractive
apt-get update -qq

# Node.js 20
NODE_VER=0
command -v node &>/dev/null && NODE_VER=$(node -v | cut -d. -f1 | tr -d 'v')
if [[ $NODE_VER -lt 20 ]]; then
  info "Instalando Node.js 20..."
  apt-get install -y curl ca-certificates gnupg >/dev/null 2>&1
  curl -fsSL https://deb.nodesource.com/setup_20.x | bash - >/dev/null 2>&1
  apt-get install -y nodejs >/dev/null 2>&1
fi
ok "Node.js $(node -v)"

# PostgreSQL 16 + PostGIS
if ! command -v psql &>/dev/null; then
  info "Instalando PostgreSQL 16 + PostGIS..."
  apt-get install -y gnupg curl ca-certificates >/dev/null 2>&1
  install -d -m 0755 /etc/apt/keyrings
  curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc \
    -o /tmp/pgdg.asc 2>/dev/null
  gpg --dearmor -o /etc/apt/keyrings/postgresql.gpg /tmp/pgdg.asc
  rm -f /tmp/pgdg.asc
  echo "deb [signed-by=/etc/apt/keyrings/postgresql.gpg] https://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" \
    > /etc/apt/sources.list.d/pgdg.list
  apt-get update -qq
  apt-get install -y postgresql-16 postgresql-16-postgis-3 >/dev/null 2>&1
fi
systemctl start postgresql && systemctl enable postgresql
ok "PostgreSQL $(psql --version | awk '{print $3}')"

apt-get install -y redis-server nginx certbot python3-certbot-nginx openssl >/dev/null 2>&1
systemctl enable redis-server && systemctl start redis-server
ok "Redis + Nginx + Certbot"

# ── 2. Usuário e diretórios ───────────────────────────────────
step "Ambiente"
id -u "$APP_USER" &>/dev/null || useradd -r -s /bin/bash -m -d "$INSTALL_DIR" "$APP_USER"
mkdir -p "$INSTALL_DIR"/{src/{routes/integrations,middleware,plugins,config,jobs,services},sql/migrations,logs,backups}
ok "Usuário $APP_USER e diretórios prontos"

# ── 3. Banco de dados ─────────────────────────────────────────
step "PostgreSQL"
sudo -u postgres psql -c "CREATE USER ${PG_USER} WITH PASSWORD '${PGPASSWORD}';" 2>/dev/null || \
  sudo -u postgres psql -c "ALTER USER ${PG_USER} WITH PASSWORD '${PGPASSWORD}';" 2>/dev/null || true
sudo -u postgres psql -c "CREATE DATABASE ${PG_DB} OWNER ${PG_USER};" 2>/dev/null || true
sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE ${PG_DB} TO ${PG_USER};" 2>/dev/null || true
for ext in postgis pgcrypto "uuid-ossp"; do
  sudo -u postgres psql -d "$PG_DB" -c "CREATE EXTENSION IF NOT EXISTS \"$ext\";" 2>/dev/null || true
done
ok "Banco $PG_DB configurado"

# ── 4. Arquivos da aplicação ─────────────────────────────────
step "Instalando LiberMap v$VERSION"

cat > "$INSTALL_DIR/package.json" << 'HEREDOC_EOF'
{
  "name": "libermap-api",
  "version": "1.0.0",
  "description": "LiberMap — API de documentação de redes FTTH para ISPs",
  "main": "src/server.js",
  "type": "module",
  "scripts": {
    "dev":    "node --watch src/server.js",
    "start":  "node src/server.js",
    "migrate":"node src/scripts/migrate.js",
    "seed":   "node src/scripts/seed.js",
    "test":   "node --test src/**/*.test.js"
  },
  "dependencies": {
    "fastify":              "^4.26.2",
    "@fastify/jwt":         "^8.0.1",
    "@fastify/cors":        "^9.0.1",
    "@fastify/helmet":      "^11.1.1",
    "@fastify/rate-limit":  "^9.1.0",
    "@fastify/swagger":     "^8.14.0",
    "@fastify/swagger-ui":  "^4.0.0",
    "pg":                   "^8.11.3",
    "postgres":             "^3.4.3",
    "redis":                "^4.6.13",
    "bcrypt":               "^5.1.1",
    "zod":                  "^3.22.4",
    "pino":                 "^8.19.0",
    "pino-pretty":          "^11.0.0",
    "dotenv":               "^16.4.5"
  },
  "devDependencies": {
    "eslint": "^8.57.0"
  }
}
HEREDOC_EOF

mkdir -p "$(dirname "$INSTALL_DIR/sql/migrations/001_base.sql")"
cat > "$INSTALL_DIR/sql/migrations/001_base.sql" << 'HEREDOC_EOF'
-- =============================================================
-- LiberMap — Migration 001: Base multi-tenant + auth
-- PostgreSQL 16 + PostGIS 3.x
-- =============================================================

-- Extensões
CREATE EXTENSION IF NOT EXISTS postgis;
CREATE EXTENSION IF NOT EXISTS pgcrypto;
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";

-- =============================================================
-- ENUMS
-- =============================================================
CREATE TYPE node_type       AS ENUM ('OLT','CEO','CTO','ONT','PASS');
CREATE TYPE cable_type      AS ENUM ('ADSS','AS','SUBTERRANEO','DROP_FIGURE8','INTERNO','DUTO');
CREATE TYPE impl_state      AS ENUM ('PLANEJADO','EM_IMPLANTACAO','IMPLANTADO','DESATIVADO');
CREATE TYPE signal_status   AS ENUM ('ok','warning','critical','saturated','offline');
CREATE TYPE user_role       AS ENUM ('superadmin','admin','engineer','technician','viewer','sales');
CREATE TYPE os_status       AS ENUM ('aberta','em_andamento','aguardando','concluida','cancelada');
CREATE TYPE os_type         AS ENUM ('instalacao','manutencao','suporte','vistoria','remocao');
CREATE TYPE splitter_tech   AS ENUM ('PLC','FBT');

-- =============================================================
-- ISP (tenant)
-- =============================================================
CREATE TABLE isp (
    id            UUID PRIMARY KEY DEFAULT gen_random_uuid(),
    slug          TEXT NOT NULL UNIQUE,           -- ex: netvalve
    name          TEXT NOT NULL,
    cnpj          TEXT,
    plan          TEXT NOT NULL DEFAULT 'basic',  -- basic | pro | enterprise
    active        BOOLEAN NOT NULL DEFAULT true,
    settings      JSONB NOT NULL DEFAULT '{}',
    created_at    TIMESTAMPTZ NOT NULL DEFAULT NOW(),
    updated_at    TIMESTAMPTZ NOT NULL DEFAULT NOW()
);

-- =============================================================
-- USUÁRIOS
-- =============================================================
CREATE TABLE app_user (
    id            UUID PRIMARY KEY DEFAULT gen_random_uuid(),
    isp_id        UUID NOT NULL REFERENCES isp(id) ON DELETE CASCADE,
    name          TEXT NOT NULL,
    email         TEXT NOT NULL,
    password_hash TEXT NOT NULL,
    role          user_role NOT NULL DEFAULT 'viewer',
    active        BOOLEAN NOT NULL DEFAULT true,
    avatar_url    TEXT,
    last_login    TIMESTAMPTZ,
    created_at    TIMESTAMPTZ NOT NULL DEFAULT NOW(),
    UNIQUE (isp_id, email)
);

CREATE TABLE refresh_token (
    id          UUID PRIMARY KEY DEFAULT gen_random_uuid(),
    user_id     UUID NOT NULL REFERENCES app_user(id) ON DELETE CASCADE,
    token_hash  TEXT NOT NULL UNIQUE,
    expires_at  TIMESTAMPTZ NOT NULL,
    created_at  TIMESTAMPTZ NOT NULL DEFAULT NOW()
);

-- =============================================================
-- PROJETOS
-- =============================================================
CREATE TABLE project (
    id          UUID PRIMARY KEY DEFAULT gen_random_uuid(),
    isp_id      UUID NOT NULL REFERENCES isp(id) ON DELETE CASCADE,
    parent_id   UUID REFERENCES project(id),
    name        TEXT NOT NULL,
    description TEXT,
    active      BOOLEAN NOT NULL DEFAULT true,
    created_at  TIMESTAMPTZ NOT NULL DEFAULT NOW()
);

-- =============================================================
-- POSTES
-- =============================================================
CREATE TABLE pole (
    id          UUID PRIMARY KEY DEFAULT gen_random_uuid(),
    isp_id      UUID NOT NULL REFERENCES isp(id) ON DELETE CASCADE,
    project_id  UUID REFERENCES project(id),
    code        TEXT,
    pole_type   TEXT,                    -- concreto, madeira, fibrocimento
    height_m    NUMERIC(5,2),
    address     TEXT,
    location    GEOMETRY(Point, 4326) NOT NULL,
    impl_state  impl_state NOT NULL DEFAULT 'IMPLANTADO',
    tags        TEXT[] NOT NULL DEFAULT '{}',
    created_at  TIMESTAMPTZ NOT NULL DEFAULT NOW(),
    updated_at  TIMESTAMPTZ NOT NULL DEFAULT NOW()
);

-- =============================================================
-- NÓS ÓPTICOS (OLT, CEO, CTO, ONT, PASS)
-- =============================================================
CREATE TABLE optical_node (
    id              UUID PRIMARY KEY DEFAULT gen_random_uuid(),
    isp_id          UUID NOT NULL REFERENCES isp(id) ON DELETE CASCADE,
    project_id      UUID REFERENCES project(id),
    pole_id         UUID REFERENCES pole(id),
    parent_id       UUID REFERENCES optical_node(id),  -- CEO→CTO hierarchy

    node_type       node_type NOT NULL,
    name            TEXT NOT NULL,
    address         TEXT,
    location        GEOMETRY(Point, 4326) NOT NULL,

    -- Caixa
    model           TEXT,
    manufacturer    TEXT,
    serial          TEXT,
    capacity_ports  INT,

    -- OLT específico
    olt_ip          INET,
    olt_slots       INT,
    olt_vendor      TEXT,               -- Huawei, ZTE, Fiberhome…

    -- ONT específico
    ont_serial      TEXT,
    pppoe_login     TEXT,
    rx_power        NUMERIC(6,2),       -- dBm lido
    tx_power        NUMERIC(6,2),
    signal_status   signal_status,
    last_seen       TIMESTAMPTZ,

    impl_state      impl_state NOT NULL DEFAULT 'PLANEJADO',
    border_color    TEXT DEFAULT '#185FA5',
    fill_color      TEXT DEFAULT '#E6F1FB',
    tags            TEXT[] NOT NULL DEFAULT '{}',
    metadata        JSONB NOT NULL DEFAULT '{}',

    created_at      TIMESTAMPTZ NOT NULL DEFAULT NOW(),
    updated_at      TIMESTAMPTZ NOT NULL DEFAULT NOW()
);

-- =============================================================
-- CABOS
-- =============================================================
CREATE TABLE cable (
    id              UUID PRIMARY KEY DEFAULT gen_random_uuid(),
    isp_id          UUID NOT NULL REFERENCES isp(id) ON DELETE CASCADE,
    project_id      UUID REFERENCES project(id),
    node_start      UUID REFERENCES optical_node(id),
    node_end        UUID REFERENCES optical_node(id),

    name            TEXT,
    cable_type      cable_type NOT NULL DEFAULT 'AS',
    fiber_count     INT NOT NULL,       -- 6,12,24,36,48,72,96,144
    level           INT NOT NULL DEFAULT 1, -- 1=backbone, 2=distrib, 3=acesso

    -- Geometria (calculada via trigger se necessário)
    path            GEOMETRY(LineString, 4326) NOT NULL,
    length_m        NUMERIC(10,2)
        GENERATED ALWAYS AS (ST_Length(path::geography)) STORED,

    att_db_km       NUMERIC(5,3) NOT NULL DEFAULT 0.35,
    color           TEXT,
    impl_state      impl_state NOT NULL DEFAULT 'PLANEJADO',
    tags            TEXT[] NOT NULL DEFAULT '{}',

    created_at      TIMESTAMPTZ NOT NULL DEFAULT NOW(),
    updated_at      TIMESTAMPTZ NOT NULL DEFAULT NOW()
);

-- =============================================================
-- SPLITTERS
-- =============================================================
CREATE TABLE splitter (
    id              UUID PRIMARY KEY DEFAULT gen_random_uuid(),
    isp_id          UUID NOT NULL REFERENCES isp(id) ON DELETE CASCADE,
    node_id         UUID NOT NULL REFERENCES optical_node(id) ON DELETE CASCADE,
    name            TEXT,
    ratio           TEXT NOT NULL,      -- "1:8", "1:16", "1:4"
    technology      splitter_tech NOT NULL DEFAULT 'PLC',
    balanced        BOOLEAN NOT NULL DEFAULT true,
    insertion_loss  NUMERIC(5,2),       -- calculado ou manual
    position        INT,                -- posição na bandeja
    created_at      TIMESTAMPTZ NOT NULL DEFAULT NOW()
);

CREATE TABLE splitter_port (
    id                    UUID PRIMARY KEY DEFAULT gen_random_uuid(),
    splitter_id           UUID NOT NULL REFERENCES splitter(id) ON DELETE CASCADE,
    port_number           INT NOT NULL,
    ratio_pct             NUMERIC(5,2),       -- para splitter desbalanceado
    -- auto-relacionamento para cascata:
    downstream_splitter_id UUID REFERENCES splitter(id),
    client_id             UUID,               -- FK para network_client (abaixo)
    active                BOOLEAN NOT NULL DEFAULT true,
    CONSTRAINT chk_single_downstream
        CHECK (
            (downstream_splitter_id IS NULL) != (client_id IS NULL)
            OR (downstream_splitter_id IS NULL AND client_id IS NULL)
        ),
    UNIQUE (splitter_id, port_number)
);

-- =============================================================
-- CLIENTES
-- =============================================================
CREATE TABLE network_client (
    id              UUID PRIMARY KEY DEFAULT gen_random_uuid(),
    isp_id          UUID NOT NULL REFERENCES isp(id) ON DELETE CASCADE,
    project_id      UUID REFERENCES project(id),
    node_id         UUID REFERENCES optical_node(id), -- ONT do cliente
    pole_id         UUID REFERENCES pole(id),

    -- Dados pessoais
    name            TEXT NOT NULL,
    document        TEXT,               -- CPF/CNPJ
    email           TEXT,
    phone           TEXT,

    -- ERP
    erp_id          TEXT,               -- ID no Hubsoft/IXC/SGP
    erp_source      TEXT,               -- 'hubsoft','ixc','sgp','voalle','mk'
    pppoe_login     TEXT,
    plan_name       TEXT,
    plan_speed_down INT,                -- Mbps
    plan_speed_up   INT,

    -- Rede
    ont_serial      TEXT,
    cto_id          UUID REFERENCES optical_node(id),
    cto_port        INT,
    rx_power        NUMERIC(6,2),
    signal_status   signal_status,

    location        GEOMETRY(Point, 4326),
    address         TEXT,

    impl_state      impl_state NOT NULL DEFAULT 'PLANEJADO',
    active          BOOLEAN NOT NULL DEFAULT true,
    tags            TEXT[] NOT NULL DEFAULT '{}',
    metadata        JSONB NOT NULL DEFAULT '{}',

    created_at      TIMESTAMPTZ NOT NULL DEFAULT NOW(),
    updated_at      TIMESTAMPTZ NOT NULL DEFAULT NOW()
);

-- FK bidirecional após criação da tabela
ALTER TABLE splitter_port
    ADD CONSTRAINT fk_sp_client
    FOREIGN KEY (client_id)
    REFERENCES network_client(id)
    ON DELETE SET NULL;

-- =============================================================
-- ORDENS DE SERVIÇO
-- =============================================================
CREATE TABLE work_order (
    id              UUID PRIMARY KEY DEFAULT gen_random_uuid(),
    isp_id          UUID NOT NULL REFERENCES isp(id) ON DELETE CASCADE,
    client_id       UUID REFERENCES network_client(id),
    assigned_to     UUID REFERENCES app_user(id),
    created_by      UUID REFERENCES app_user(id),

    os_type         os_type NOT NULL,
    status          os_status NOT NULL DEFAULT 'aberta',
    title           TEXT NOT NULL,
    description     TEXT,
    address         TEXT,
    location        GEOMETRY(Point, 4326),

    -- ERP sync
    erp_id          TEXT,
    erp_source      TEXT,

    scheduled_at    TIMESTAMPTZ,
    started_at      TIMESTAMPTZ,
    finished_at     TIMESTAMPTZ,

    checklist       JSONB NOT NULL DEFAULT '[]',
    attachments     JSONB NOT NULL DEFAULT '[]',
    notes           TEXT,
    metadata        JSONB NOT NULL DEFAULT '{}',

    created_at      TIMESTAMPTZ NOT NULL DEFAULT NOW(),
    updated_at      TIMESTAMPTZ NOT NULL DEFAULT NOW()
);

-- =============================================================
-- REGIÕES E CONDOMÍNIOS
-- =============================================================
CREATE TABLE region (
    id          UUID PRIMARY KEY DEFAULT gen_random_uuid(),
    isp_id      UUID NOT NULL REFERENCES isp(id) ON DELETE CASCADE,
    project_id  UUID REFERENCES project(id),
    name        TEXT NOT NULL,
    region_type TEXT NOT NULL DEFAULT 'area',  -- area, expansion, planning
    boundary    GEOMETRY(Polygon, 4326),
    color       TEXT DEFAULT '#1D9E75',
    tags        TEXT[] NOT NULL DEFAULT '{}',
    created_at  TIMESTAMPTZ NOT NULL DEFAULT NOW()
);

CREATE TABLE condominium (
    id          UUID PRIMARY KEY DEFAULT gen_random_uuid(),
    isp_id      UUID NOT NULL REFERENCES isp(id) ON DELETE CASCADE,
    project_id  UUID REFERENCES project(id),
    name        TEXT NOT NULL,
    condo_type  TEXT NOT NULL,           -- 'vertical','horizontal'
    floors      INT,
    units       INT,
    address     TEXT,
    location    GEOMETRY(Point, 4326),   -- vertical: ponto; horizontal: centroide
    boundary    GEOMETRY(Polygon, 4326), -- horizontal: polígono de área
    cto_id      UUID REFERENCES optical_node(id),
    metadata    JSONB NOT NULL DEFAULT '{}',
    created_at  TIMESTAMPTZ NOT NULL DEFAULT NOW()
);

-- =============================================================
-- AUDIT LOG
-- =============================================================
CREATE TABLE audit_log (
    id          BIGSERIAL PRIMARY KEY,
    isp_id      UUID NOT NULL,
    user_id     UUID,
    entity      TEXT NOT NULL,
    entity_id   UUID,
    action      TEXT NOT NULL,          -- INSERT,UPDATE,DELETE
    diff        JSONB,
    created_at  TIMESTAMPTZ NOT NULL DEFAULT NOW()
);

-- =============================================================
-- ÍNDICES GEOESPACIAIS E DE PERFORMANCE
-- =============================================================
CREATE INDEX idx_pole_location           ON pole           USING GIST(location);
CREATE INDEX idx_pole_isp                ON pole           (isp_id);
CREATE INDEX idx_node_location           ON optical_node   USING GIST(location);
CREATE INDEX idx_node_isp_type           ON optical_node   (isp_id, node_type);
CREATE INDEX idx_node_isp_project        ON optical_node   (isp_id, project_id);
CREATE INDEX idx_cable_path              ON cable          USING GIST(path);
CREATE INDEX idx_cable_isp               ON cable          (isp_id);
CREATE INDEX idx_client_location         ON network_client USING GIST(location);
CREATE INDEX idx_client_isp              ON network_client (isp_id);
CREATE INDEX idx_client_cto              ON network_client (cto_id);
CREATE INDEX idx_client_ont_serial       ON network_client (ont_serial);
CREATE INDEX idx_client_erp              ON network_client (erp_source, erp_id);
CREATE INDEX idx_wo_isp_status           ON work_order     (isp_id, status);
CREATE INDEX idx_wo_assigned             ON work_order     (assigned_to);
CREATE INDEX idx_audit_entity            ON audit_log      (entity, entity_id);
CREATE INDEX idx_audit_isp               ON audit_log      (isp_id, created_at DESC);

-- =============================================================
-- ROW LEVEL SECURITY (multi-tenant)
-- =============================================================
ALTER TABLE optical_node    ENABLE ROW LEVEL SECURITY;
ALTER TABLE cable           ENABLE ROW LEVEL SECURITY;
ALTER TABLE network_client  ENABLE ROW LEVEL SECURITY;
ALTER TABLE work_order      ENABLE ROW LEVEL SECURITY;
ALTER TABLE pole            ENABLE ROW LEVEL SECURITY;

-- Cria políticas via função (chamada pela API com SET LOCAL)
CREATE OR REPLACE FUNCTION set_isp_context(p_isp_id UUID) RETURNS void
    LANGUAGE plpgsql AS $$
BEGIN
    PERFORM set_config('app.isp_id', p_isp_id::text, true);
END;
$$;

CREATE POLICY isp_isolation_node   ON optical_node   USING (isp_id = current_setting('app.isp_id')::UUID);
CREATE POLICY isp_isolation_cable  ON cable          USING (isp_id = current_setting('app.isp_id')::UUID);
CREATE POLICY isp_isolation_client ON network_client USING (isp_id = current_setting('app.isp_id')::UUID);
CREATE POLICY isp_isolation_wo     ON work_order     USING (isp_id = current_setting('app.isp_id')::UUID);
CREATE POLICY isp_isolation_pole   ON pole           USING (isp_id = current_setting('app.isp_id')::UUID);

-- =============================================================
-- FUNÇÃO: tiles MVT (Mapbox Vector Tiles)
-- =============================================================
CREATE OR REPLACE FUNCTION get_mvt_tile(
    p_z INT, p_x INT, p_y INT,
    p_isp_id UUID,
    p_project_id UUID DEFAULT NULL
)
RETURNS bytea
LANGUAGE plpgsql STABLE AS $$
DECLARE
    v_bounds    GEOMETRY;
    v_tile      bytea;
    v_margin    FLOAT := 0.05;
BEGIN
    v_bounds := ST_TileEnvelope(p_z, p_x, p_y);

    WITH
    nodes_layer AS (
        SELECT
            n.id::text, n.node_type::text AS type,
            n.name, n.impl_state::text AS state,
            n.signal_status::text AS signal,
            n.rx_power,
            ST_AsMVTGeom(n.location, v_bounds, 4096, 64) AS geom
        FROM optical_node n
        WHERE n.isp_id = p_isp_id
          AND (p_project_id IS NULL OR n.project_id = p_project_id)
          AND n.location && ST_Expand(v_bounds, (ST_XMax(v_bounds)-ST_XMin(v_bounds))*v_margin)
          -- zoom-based filtering
          AND CASE
              WHEN p_z <= 13 THEN n.node_type IN ('OLT','CEO')
              WHEN p_z <= 16 THEN n.node_type IN ('OLT','CEO','CTO')
              ELSE TRUE
              END
    ),
    cables_layer AS (
        SELECT
            c.id::text, c.cable_type::text AS type,
            c.fiber_count, c.level,
            c.impl_state::text AS state,
            ST_AsMVTGeom(c.path, v_bounds, 4096, 64) AS geom
        FROM cable c
        WHERE c.isp_id = p_isp_id
          AND (p_project_id IS NULL OR c.project_id = p_project_id)
          AND c.path && ST_Expand(v_bounds, (ST_XMax(v_bounds)-ST_XMin(v_bounds))*v_margin)
          AND CASE WHEN p_z < 14 THEN c.level <= 2 ELSE TRUE END
    ),
    clients_layer AS (
        SELECT
            cl.id::text,
            cl.signal_status::text AS signal,
            cl.rx_power,
            ST_AsMVTGeom(cl.location, v_bounds, 4096, 64) AS geom
        FROM network_client cl
        WHERE cl.isp_id = p_isp_id
          AND p_z >= 16    -- só mostrar clientes em zoom alto
          AND cl.location && v_bounds
    )
    SELECT ST_Concat(
        (SELECT ST_AsMVT(n, 'nodes',   4096, 'geom') FROM nodes_layer n   WHERE n.geom IS NOT NULL),
        (SELECT ST_AsMVT(c, 'cables',  4096, 'geom') FROM cables_layer c  WHERE c.geom IS NOT NULL),
        (SELECT ST_AsMVT(cl,'clients', 4096, 'geom') FROM clients_layer cl WHERE cl.geom IS NOT NULL)
    ) INTO v_tile;

    RETURN v_tile;
END;
$$;

-- =============================================================
-- FUNÇÃO: orçamento óptico recursivo
-- =============================================================
CREATE OR REPLACE FUNCTION calc_optical_budget(
    p_client_id UUID
)
RETURNS TABLE(
    hop_order    INT,
    from_name    TEXT,
    to_name      TEXT,
    hop_type     TEXT,
    loss_db      NUMERIC,
    cum_loss_db  NUMERIC,
    rx_dbm       NUMERIC,
    signal_status signal_status
)
LANGUAGE plpgsql STABLE AS $$
DECLARE
    v_olt_tx    NUMERIC := 3.0;    -- dBm
    v_threshold NUMERIC := -28.0;
    v_saturation NUMERIC := -8.0;
BEGIN
    RETURN QUERY
    WITH RECURSIVE budget AS (
        -- Anchor: OLT TX
        SELECT
            1 AS hop,
            'OLT'::TEXT   AS from_name,
            n.name        AS to_name,
            'cable'       AS hop_type,
            ROUND((c.length_m/1000.0 * c.att_db_km)::NUMERIC, 4) AS loss,
            ROUND((c.length_m/1000.0 * c.att_db_km)::NUMERIC, 4) AS cum_loss,
            c.id          AS cable_id,
            n.id          AS node_id,
            1             AS depth
        FROM network_client cl
        JOIN optical_node n  ON n.id = cl.cto_id
        JOIN cable c         ON c.node_end = n.id
        WHERE cl.id = p_client_id

        UNION ALL

        -- Recursive: cada splitter e cabo à frente
        SELECT
            b.hop + 1,
            b.to_name,
            COALESCE(s.name, cn.name)::TEXT,
            CASE WHEN s.id IS NOT NULL THEN 'splitter' ELSE 'cable' END,
            CASE
                WHEN s.id IS NOT NULL AND s.balanced THEN s.insertion_loss
                WHEN s.id IS NOT NULL THEN ROUND(10 * LOG(100.0 / sp.ratio_pct), 4)
                ELSE ROUND((c2.length_m/1000.0 * c2.att_db_km)::NUMERIC, 4)
            END,
            b.cum_loss + CASE
                WHEN s.id IS NOT NULL AND s.balanced THEN s.insertion_loss
                WHEN s.id IS NOT NULL THEN ROUND(10 * LOG(100.0 / sp.ratio_pct), 4)
                ELSE ROUND((c2.length_m/1000.0 * c2.att_db_km)::NUMERIC, 4)
            END,
            c2.id,
            cn.id,
            b.depth + 1
        FROM budget b
        LEFT JOIN splitter s       ON s.node_id = b.node_id
        LEFT JOIN splitter_port sp ON sp.splitter_id = s.id AND sp.active
        LEFT JOIN optical_node cn  ON cn.id = sp.downstream_splitter_id OR cn.id = (
            SELECT node_id FROM splitter WHERE id = sp.downstream_splitter_id LIMIT 1
        )
        LEFT JOIN cable c2         ON c2.node_end = cn.id
        WHERE b.depth < 10
    )
    SELECT
        hop,
        from_name,
        to_name,
        hop_type,
        loss,
        cum_loss,
        ROUND((v_olt_tx - cum_loss)::NUMERIC, 2) AS rx_dbm,
        CASE
            WHEN (v_olt_tx - cum_loss) > v_saturation  THEN 'saturated'
            WHEN (v_olt_tx - cum_loss) >= v_threshold   THEN 'ok'
            WHEN (v_olt_tx - cum_loss) >= v_threshold - 2 THEN 'warning'
            ELSE 'critical'
        END::signal_status
    FROM budget
    ORDER BY hop;
END;
$$;

-- =============================================================
-- TRIGGER: updated_at automático
-- =============================================================
CREATE OR REPLACE FUNCTION touch_updated_at()
RETURNS TRIGGER LANGUAGE plpgsql AS $$
BEGIN NEW.updated_at = NOW(); RETURN NEW; END;
$$;

CREATE TRIGGER trg_node_updated   BEFORE UPDATE ON optical_node   FOR EACH ROW EXECUTE FUNCTION touch_updated_at();
CREATE TRIGGER trg_cable_updated  BEFORE UPDATE ON cable          FOR EACH ROW EXECUTE FUNCTION touch_updated_at();
CREATE TRIGGER trg_client_updated BEFORE UPDATE ON network_client FOR EACH ROW EXECUTE FUNCTION touch_updated_at();
CREATE TRIGGER trg_wo_updated     BEFORE UPDATE ON work_order     FOR EACH ROW EXECUTE FUNCTION touch_updated_at();
CREATE TRIGGER trg_pole_updated   BEFORE UPDATE ON pole           FOR EACH ROW EXECUTE FUNCTION touch_updated_at();
HEREDOC_EOF

mkdir -p "$(dirname "$INSTALL_DIR/sql/migrations/002_permissions_licenses.sql")"
cat > "$INSTALL_DIR/sql/migrations/002_permissions_licenses.sql" << 'HEREDOC_EOF'
-- =============================================================
-- LiberMap — Migration 002: Permissões granulares + licenças
-- =============================================================

-- =============================================================
-- LICENÇAS
-- =============================================================
CREATE TYPE license_status AS ENUM ('active','trial','suspended','cancelled','expired');
CREATE TYPE license_plan   AS ENUM ('starter','basic','pro','enterprise');

CREATE TABLE license (
    id              UUID PRIMARY KEY DEFAULT gen_random_uuid(),
    isp_id          UUID NOT NULL REFERENCES isp(id) ON DELETE CASCADE,
    plan            license_plan NOT NULL DEFAULT 'starter',
    status          license_status NOT NULL DEFAULT 'trial',

    -- Limites por plano
    max_nodes       INT NOT NULL DEFAULT 100,
    max_clients     INT NOT NULL DEFAULT 200,
    max_users       INT NOT NULL DEFAULT 3,
    max_projects    INT NOT NULL DEFAULT 1,

    -- Datas
    trial_ends_at   TIMESTAMPTZ,
    current_period_start TIMESTAMPTZ NOT NULL DEFAULT NOW(),
    current_period_end   TIMESTAMPTZ NOT NULL DEFAULT NOW() + INTERVAL '30 days',
    grace_period_end     TIMESTAMPTZ,    -- 7d após vencimento antes de bloquear

    -- Chave de licença (assinada com RSA no license server)
    license_key     TEXT UNIQUE,
    license_sig     TEXT,               -- assinatura RSA base64

    -- Controle de sync
    last_heartbeat  TIMESTAMPTZ,        -- última comunicação com license server
    heartbeat_token TEXT,               -- token rotativo para heartbeat

    -- Metadata de cobrança
    payment_ref     TEXT,               -- ID no gateway de pagamento
    metadata        JSONB NOT NULL DEFAULT '{}',

    created_at      TIMESTAMPTZ NOT NULL DEFAULT NOW(),
    updated_at      TIMESTAMPTZ NOT NULL DEFAULT NOW()
);

CREATE UNIQUE INDEX idx_license_isp ON license(isp_id);

-- =============================================================
-- PERMISSÕES GRANULARES
-- Resource-based: cada role tem permissões explícitas por recurso
-- =============================================================
CREATE TYPE permission_action AS ENUM ('read','create','update','delete','manage');

-- Tabela de recursos do sistema
CREATE TABLE resource (
    id      SERIAL PRIMARY KEY,
    name    TEXT NOT NULL UNIQUE,   -- 'nodes', 'cables', 'clients', 'os', ...
    label   TEXT NOT NULL
);

INSERT INTO resource (name, label) VALUES
    ('nodes',        'Nós ópticos (OLT/CEO/CTO/ONT)'),
    ('cables',       'Cabos'),
    ('clients',      'Clientes'),
    ('work_orders',  'Ordens de serviço'),
    ('poles',        'Postes'),
    ('splitters',    'Splitters'),
    ('regions',      'Regiões e áreas'),
    ('condominiums', 'Condomínios'),
    ('projects',     'Projetos'),
    ('users',        'Usuários'),
    ('integrations', 'Integrações ERP'),
    ('monitoring',   'Monitoramento'),
    ('reports',      'Relatórios'),
    ('import',       'Importação de dados'),
    ('tiles',        'Tiles do mapa'),
    ('budget',       'Orçamento óptico');

-- Permissões por role (padrão do sistema)
CREATE TABLE role_permission (
    id          SERIAL PRIMARY KEY,
    isp_id      UUID REFERENCES isp(id) ON DELETE CASCADE,  -- NULL = global
    role        user_role NOT NULL,
    resource    TEXT NOT NULL REFERENCES resource(name),
    action      permission_action NOT NULL,
    UNIQUE (isp_id, role, resource, action)
);

-- Seed: permissões padrão por role
-- superadmin: tudo
INSERT INTO role_permission (isp_id, role, resource, action)
SELECT NULL, 'superadmin', r.name, a.action
FROM resource r
CROSS JOIN (VALUES ('read'::permission_action),('create'),('update'),('delete'),('manage')) AS a(action);

-- admin: tudo exceto manage de usuários globais
INSERT INTO role_permission (isp_id, role, resource, action)
SELECT NULL, 'admin', r.name, a.action
FROM resource r
CROSS JOIN (VALUES ('read'::permission_action),('create'),('update'),('delete'),('manage')) AS a(action);

-- engineer: tudo de rede, sem gerenciar usuários/integrações
INSERT INTO role_permission (isp_id, role, resource, action)
SELECT NULL, 'engineer', r.name, a.action
FROM resource r
CROSS JOIN (VALUES ('read'::permission_action),('create'),('update'),('delete')) AS a(action)
WHERE r.name NOT IN ('users','integrations');

INSERT INTO role_permission (isp_id, role, resource, action)
SELECT NULL, 'engineer', r.name, 'read'
FROM resource r WHERE r.name IN ('users','integrations')
ON CONFLICT DO NOTHING;

-- technician: leitura de rede + OS completo + atualizar clientes
INSERT INTO role_permission (isp_id, role, resource, action)
SELECT NULL, 'technician', r.name, a.action
FROM resource r
CROSS JOIN (VALUES ('read'::permission_action)) AS a(action);

INSERT INTO role_permission (isp_id, role, resource, action)
SELECT NULL, 'technician', r.name, a.action
FROM resource r
CROSS JOIN (VALUES ('create'::permission_action),('update')) AS a(action)
WHERE r.name IN ('work_orders','clients')
ON CONFLICT DO NOTHING;

-- sales: viabilidade, prospects, leitura de rede
INSERT INTO role_permission (isp_id, role, resource, action)
SELECT NULL, 'sales', r.name, 'read'
FROM resource r;

INSERT INTO role_permission (isp_id, role, resource, action)
SELECT NULL, 'sales', r.name, 'create'
FROM resource r WHERE r.name IN ('clients','regions')
ON CONFLICT DO NOTHING;

-- viewer: somente leitura
INSERT INTO role_permission (isp_id, role, resource, action)
SELECT NULL, 'viewer', r.name, 'read'
FROM resource r;

-- Permissões customizadas por usuário (override de role)
CREATE TABLE user_permission (
    id          SERIAL PRIMARY KEY,
    user_id     UUID NOT NULL REFERENCES app_user(id) ON DELETE CASCADE,
    resource    TEXT NOT NULL REFERENCES resource(name),
    action      permission_action NOT NULL,
    granted     BOOLEAN NOT NULL DEFAULT true,  -- true=grant, false=deny
    created_by  UUID REFERENCES app_user(id),
    created_at  TIMESTAMPTZ NOT NULL DEFAULT NOW(),
    UNIQUE (user_id, resource, action)
);

-- =============================================================
-- LOG DE AUDITORIA DE ACESSO (para compliance)
-- =============================================================
CREATE TABLE access_log (
    id          BIGSERIAL PRIMARY KEY,
    isp_id      UUID NOT NULL,
    user_id     UUID,
    ip          INET,
    method      TEXT,
    path        TEXT,
    status_code INT,
    duration_ms INT,
    created_at  TIMESTAMPTZ NOT NULL DEFAULT NOW()
);

CREATE INDEX idx_access_log_isp  ON access_log (isp_id, created_at DESC);
CREATE INDEX idx_access_log_user ON access_log (user_id, created_at DESC);

-- =============================================================
-- FUNÇÃO: verifica permissão de usuário (considera overrides)
-- =============================================================
CREATE OR REPLACE FUNCTION check_permission(
    p_user_id  UUID,
    p_resource TEXT,
    p_action   permission_action
) RETURNS BOOLEAN
LANGUAGE plpgsql STABLE AS $$
DECLARE
    v_role     user_role;
    v_isp_id   UUID;
    v_override BOOLEAN;
    v_has_role BOOLEAN;
BEGIN
    SELECT role, isp_id INTO v_role, v_isp_id
    FROM app_user WHERE id = p_user_id AND active = true;

    IF NOT FOUND THEN RETURN FALSE; END IF;

    -- 1. Verifica override individual (explícito ganha)
    SELECT granted INTO v_override
    FROM user_permission
    WHERE user_id = p_user_id AND resource = p_resource AND action = p_action;

    IF FOUND THEN RETURN v_override; END IF;

    -- 2. Verifica permissão da role (ISP-specific primeiro, depois global)
    SELECT EXISTS (
        SELECT 1 FROM role_permission
        WHERE role = v_role
          AND resource = p_resource
          AND action = p_action
          AND (isp_id = v_isp_id OR isp_id IS NULL)
    ) INTO v_has_role;

    RETURN v_has_role;
END;
$$;

-- =============================================================
-- FUNÇÃO: verifica licença do ISP
-- =============================================================
CREATE OR REPLACE FUNCTION check_license(p_isp_id UUID)
RETURNS TABLE(
    allowed     BOOLEAN,
    status      license_status,
    plan        license_plan,
    reason      TEXT,
    expires_at  TIMESTAMPTZ
)
LANGUAGE plpgsql STABLE AS $$
DECLARE
    v_lic license%ROWTYPE;
BEGIN
    SELECT * INTO v_lic FROM license WHERE isp_id = p_isp_id;

    IF NOT FOUND THEN
        RETURN QUERY SELECT false, 'cancelled'::license_status, 'starter'::license_plan,
            'Licença não encontrada', NULL::TIMESTAMPTZ;
        RETURN;
    END IF;

    -- Trial ativo
    IF v_lic.status = 'trial' AND (v_lic.trial_ends_at IS NULL OR v_lic.trial_ends_at > NOW()) THEN
        RETURN QUERY SELECT true, v_lic.status, v_lic.plan,
            'Trial ativo', v_lic.trial_ends_at;
        RETURN;
    END IF;

    -- Ativo e dentro do período
    IF v_lic.status = 'active' AND v_lic.current_period_end > NOW() THEN
        RETURN QUERY SELECT true, v_lic.status, v_lic.plan,
            'Licença ativa', v_lic.current_period_end;
        RETURN;
    END IF;

    -- Dentro do período de graça (7 dias após vencimento)
    IF v_lic.grace_period_end IS NOT NULL AND v_lic.grace_period_end > NOW() THEN
        RETURN QUERY SELECT true, 'suspended'::license_status, v_lic.plan,
            'Período de graça — regularize o pagamento', v_lic.grace_period_end;
        RETURN;
    END IF;

    -- Expirado / suspenso / cancelado
    RETURN QUERY SELECT false, v_lic.status, v_lic.plan,
        CASE v_lic.status
            WHEN 'suspended'  THEN 'Licença suspensa por inadimplência'
            WHEN 'cancelled'  THEN 'Licença cancelada'
            WHEN 'expired'    THEN 'Licença expirada'
            WHEN 'trial'      THEN 'Trial expirado'
            ELSE 'Acesso bloqueado'
        END,
        v_lic.current_period_end;
END;
$$;

-- =============================================================
-- Triggers de updated_at para tabelas novas
-- =============================================================
CREATE TRIGGER trg_license_updated BEFORE UPDATE ON license
    FOR EACH ROW EXECUTE FUNCTION touch_updated_at();
HEREDOC_EOF

mkdir -p "$(dirname "$INSTALL_DIR/src/config/db.js")"
cat > "$INSTALL_DIR/src/config/db.js" << 'HEREDOC_EOF'
// src/config/db.js
import postgres from 'postgres'
import { createClient } from 'redis'

// ── PostgreSQL (postgres.js) ──────────────────────────────────
const sql = postgres({
  host:     process.env.PGHOST     || 'localhost',
  port:     Number(process.env.PGPORT) || 5432,
  database: process.env.PGDATABASE || 'libermap',
  username: process.env.PGUSER     || 'libermap',
  password: process.env.PGPASSWORD,
  max:      20,               // pool size
  idle_timeout: 30,
  connect_timeout: 10,
  transform: {
    undefined: null,           // envia NULL em vez de undefined
  },
  onnotice: (notice) => {
    if (notice.severity !== 'NOTICE') return
    console.debug('[pg notice]', notice.message)
  },
})

// ── Redis ─────────────────────────────────────────────────────
const redis = createClient({ url: process.env.REDIS_URL || 'redis://localhost:6379' })
redis.on('error', (err) => console.error('[redis]', err))
await redis.connect()

// ── Helpers ───────────────────────────────────────────────────

/**
 * Aplica o contexto do ISP via RLS.
 * Chame antes de qualquer query em tabelas multi-tenant.
 */
export async function withISP(isp_id, fn) {
  return sql.begin(async (tx) => {
    await tx`SELECT set_isp_context(${isp_id}::uuid)`
    return fn(tx)
  })
}

/**
 * Cache simples com Redis.
 * ttl em segundos.
 */
export async function cached(key, ttl, fn) {
  const hit = await redis.get(key)
  if (hit) return JSON.parse(hit)
  const value = await fn()
  await redis.setEx(key, ttl, JSON.stringify(value))
  return value
}

/** Invalida prefixo de cache (ex: invalidate('tiles:isp-uuid')) */
export async function invalidate(prefix) {
  const keys = await redis.keys(`${prefix}*`)
  if (keys.length) await redis.del(keys)
}

export { sql, redis }
HEREDOC_EOF

mkdir -p "$(dirname "$INSTALL_DIR/src/middleware/auth.js")"
cat > "$INSTALL_DIR/src/middleware/auth.js" << 'HEREDOC_EOF'
// src/middleware/auth.js
import bcrypt from 'bcrypt'
import { sql, redis } from '../config/db.js'

// ── Roles e permissões ────────────────────────────────────────
export const ROLES = {
  superadmin:  5,
  admin:       4,
  engineer:    3,
  technician:  2,
  sales:       1,
  viewer:      0,
}

/**
 * Decorador Fastify: verifica JWT e injeta req.user + req.isp_id.
 * Adiciona automaticamente o contexto RLS ao pool de conexão.
 */
export async function requireAuth(req, reply) {
  try {
    const payload = await req.jwtVerify()
    req.user   = payload
    req.isp_id = payload.isp_id
  } catch (err) {
    return reply.code(401).send({ error: 'Token inválido ou expirado' })
  }
}

/**
 * Factory de middleware de role.
 * Uso: preHandler: [requireAuth, requireRole('engineer')]
 */
export function requireRole(minRole) {
  return async function (req, reply) {
    const level = ROLES[req.user?.role] ?? -1
    if (level < ROLES[minRole]) {
      return reply.code(403).send({ error: 'Permissão insuficiente' })
    }
  }
}

// ── Login ─────────────────────────────────────────────────────
export async function login({ email, password, isp_slug }) {
  // Busca ISP
  const [isp] = await sql`
    SELECT id, active FROM isp WHERE slug = ${isp_slug}
  `
  if (!isp?.active) throw Object.assign(new Error('ISP não encontrado'), { statusCode: 404 })

  // Busca usuário
  const [user] = await sql`
    SELECT id, isp_id, name, email, password_hash, role, active
    FROM app_user
    WHERE isp_id = ${isp.id} AND email = ${email.toLowerCase()}
  `
  if (!user?.active) throw Object.assign(new Error('Credenciais inválidas'), { statusCode: 401 })

  const ok = await bcrypt.compare(password, user.password_hash)
  if (!ok) throw Object.assign(new Error('Credenciais inválidas'), { statusCode: 401 })

  // Atualiza last_login sem aguardar
  sql`UPDATE app_user SET last_login = NOW() WHERE id = ${user.id}`.catch(() => {})

  return {
    id:     user.id,
    isp_id: user.isp_id,
    name:   user.name,
    email:  user.email,
    role:   user.role,
  }
}

// ── Refresh token ─────────────────────────────────────────────
export async function saveRefreshToken(user_id, tokenHash, expiresAt) {
  // Remove tokens expirados do usuário (limpeza passiva)
  await sql`
    DELETE FROM refresh_token
    WHERE user_id = ${user_id} AND expires_at < NOW()
  `
  await sql`
    INSERT INTO refresh_token (user_id, token_hash, expires_at)
    VALUES (${user_id}, ${tokenHash}, ${expiresAt})
  `
}

export async function validateRefreshToken(tokenHash) {
  const [token] = await sql`
    SELECT rt.*, u.isp_id, u.name, u.email, u.role, u.active
    FROM refresh_token rt
    JOIN app_user u ON u.id = rt.user_id
    WHERE rt.token_hash = ${tokenHash}
      AND rt.expires_at > NOW()
  `
  if (!token?.active) return null
  return token
}

export async function revokeRefreshToken(tokenHash) {
  await sql`DELETE FROM refresh_token WHERE token_hash = ${tokenHash}`
}

// ── Hash de senha ─────────────────────────────────────────────
export async function hashPassword(plain) {
  return bcrypt.hash(plain, 12)
}
HEREDOC_EOF

mkdir -p "$(dirname "$INSTALL_DIR/src/middleware/permissions.js")"
cat > "$INSTALL_DIR/src/middleware/permissions.js" << 'HEREDOC_EOF'
// src/middleware/permissions.js
import { sql } from '../config/db.js'

// Cache em memória de permissões (TTL: 2 min)
const permCache = new Map()

async function getPerms(user_id) {
  const cached = permCache.get(user_id)
  if (cached && cached.expires > Date.now()) return cached.perms

  const rows = await sql`
    SELECT resource, action FROM role_permission rp
    JOIN app_user u ON u.role = rp.role AND (rp.isp_id = u.isp_id OR rp.isp_id IS NULL)
    WHERE u.id = ${user_id} AND u.active = true
    UNION
    SELECT resource, action FROM user_permission up
    WHERE up.user_id = ${user_id} AND up.granted = true
    EXCEPT
    SELECT resource, action FROM user_permission up
    WHERE up.user_id = ${user_id} AND up.granted = false
  `

  const perms = new Set(rows.map(r => `${r.resource}:${r.action}`))
  permCache.set(user_id, { perms, expires: Date.now() + 2 * 60 * 1000 })
  return perms
}

export function invalidatePermCache(user_id) {
  permCache.delete(user_id)
}

/**
 * Factory de middleware de permissão granular.
 * Uso: { preHandler: [requireAuth, can('nodes', 'create')] }
 */
export function can(resource, action) {
  return async function permCheck(req, reply) {
    if (!req.user) return reply.code(401).send({ error: 'Não autenticado' })

    // superadmin passa direto
    if (req.user.role === 'superadmin') return

    const perms = await getPerms(req.user.id)
    if (!perms.has(`${resource}:${action}`) && !perms.has(`${resource}:manage`)) {
      return reply.code(403).send({
        error:    'Permissão insuficiente',
        required: `${resource}:${action}`,
      })
    }
  }
}

/**
 * Verifica múltiplas permissões (OR — basta ter uma).
 */
export function canAny(...checks) {
  return async function permCheck(req, reply) {
    if (!req.user) return reply.code(401).send({ error: 'Não autenticado' })
    if (req.user.role === 'superadmin') return

    const perms = await getPerms(req.user.id)
    const ok = checks.some(([resource, action]) =>
      perms.has(`${resource}:${action}`) || perms.has(`${resource}:manage`)
    )
    if (!ok) {
      return reply.code(403).send({
        error:    'Permissão insuficiente',
        required: checks.map(([r, a]) => `${r}:${a}`),
      })
    }
  }
}
HEREDOC_EOF

mkdir -p "$(dirname "$INSTALL_DIR/src/plugins/license.js")"
cat > "$INSTALL_DIR/src/plugins/license.js" << 'HEREDOC_EOF'
// src/plugins/license.js — LiberMap
// Valida licença via POST /api/license/validate (mesmo padrão do RouterACS/BQN/BI)
import { sql, redis } from '../config/db.js'

const LICENSE_SERVER = process.env.LICENSE_SERVER_URL || 'https://ispacs.libernet.com.br'
const LICENSE_KEY    = process.env.LICENSE_KEY         // ex: LM-XXXX-XXXX-XXXX-XXXX
const CHECK_INTERVAL = 6 * 60 * 60 * 1000             // valida a cada 6h
const OFFLINE_MAX    = 24 * 60 * 60 * 1000            // tolera 24h sem network
const MEM_TTL        = 5 * 60 * 1000                  // cache em memória: 5min
const CACHE_KEY      = `lic:libermap:${LICENSE_KEY}`
const EXEMPT         = ['/health','/docs','/api/v1/auth/login',
                        '/api/v1/auth/refresh','/api/v1/auth/logout']

const memCache = { payload: null, exp: 0 }

// ── Chama o servidor de licenças ──────────────────────────────
async function validate(isp_id) {
  const [isp] = await sql`SELECT name, settings->>'hostname' AS hostname FROM isp WHERE id = ${isp_id}`

  const res = await fetch(`${LICENSE_SERVER}/api/license/validate`, {
    method:  'POST',
    headers: { 'Content-Type': 'application/json' },
    body: JSON.stringify({
      license_key: LICENSE_KEY,
      product:     'libermap',
      hostname:    isp?.hostname || process.env.PUBLIC_DOMAIN || 'libermap',
    }),
    signal: AbortSignal.timeout(10_000),
  })

  if (!res.ok) throw new Error(`License server HTTP ${res.status}`)
  return res.json()
}

// ── Resolve payload (memória → Redis → servidor) ──────────────
async function resolve(isp_id) {
  // 1. Memória (5 min)
  if (memCache.payload && memCache.exp > Date.now())
    return memCache.payload

  // 2. Redis
  const cached = await redis.get(CACHE_KEY).catch(() => null)
  if (cached) {
    const p = JSON.parse(cached)
    if (new Date(p.valid_until || p.server_time) > new Date()) {
      memCache.payload = p
      memCache.exp     = Date.now() + MEM_TTL
      return p
    }
  }

  // 3. Servidor de licenças
  const p = await validate(isp_id)
  const ttl = p.valid ? 8 * 60 * 60 : 60  // 8h se válida, 1min se inválida

  // Adiciona valid_until para controle de cache
  p.valid_until = new Date(Date.now() + ttl * 1000).toISOString()

  await redis.setEx(CACHE_KEY, ttl, JSON.stringify(p)).catch(() => {})
  memCache.payload = p
  memCache.exp     = Date.now() + MEM_TTL
  return p
}

// ── Plugin Fastify ────────────────────────────────────────────
export default async function licensePlugin(app) {

  // Sem LICENSE_KEY configurada → modo desenvolvimento (sem bloqueio)
  if (!LICENSE_KEY) {
    app.log.warn('[license] LICENSE_KEY não configurada — modo desenvolvimento (sem validação)')
    app.get('/api/v1/license/status', async () => ({
      valid: true, status: 'active', plan: 'dev',
      client_name: 'Desenvolvimento', days_remaining: 999,
      message: 'Modo desenvolvimento — configure LICENSE_KEY para produção',
    }))
    return
  }

  // Hook: bloqueia requests se licença inválida
  app.addHook('onRequest', async (req, reply) => {
    if (EXEMPT.some(p => req.url.startsWith(p))) return
    const isp_id = req.user?.isp_id
    if (!isp_id) return  // auth.js tratará

    let p
    try {
      p = await resolve(isp_id)
    } catch (err) {
      // Servidor inacessível — tenta usar cache Redis mesmo velho (até 24h)
      const cached = await redis.get(CACHE_KEY).catch(() => null)
      if (cached) {
        const old = JSON.parse(cached)
        const age = Date.now() - new Date(old.server_time || 0).getTime()
        if (age < OFFLINE_MAX) {
          app.log.warn(`[license] offline mode — cache com ${Math.floor(age/3600000)}h`)
          p = old
        }
      }
      if (!p) {
        app.log.error('[license] sem licença e sem cache — bloqueando')
        return reply.code(402).send({
          error:   'Servidor de licenças inacessível',
          code:    'LICENSE_UNREACHABLE',
          contact: 'suporte@libermap.com.br',
        })
      }
    }

    req.license = p

    if (!p.valid) {
      return reply.code(402).send({
        error:   p.message || 'Licença inativa',
        code:    'LICENSE_INVALID',
        status:  p.status,
        expires: p.expires_at,
        contact: 'financeiro@libermap.com.br',
      })
    }

    // Aviso no header quando em grace period
    if (p.status === 'grace') {
      reply.header('X-License-Warning',
        `Grace period — ${p.days_remaining < 0 ? Math.abs(p.days_remaining) + 'd vencida' : p.days_remaining + 'd restantes'}`)
    }
  })

  // Revalida periodicamente em background
  async function runCheck() {
    try {
      // Invalida cache para forçar chamada ao servidor
      await redis.del(CACHE_KEY).catch(() => {})
      memCache.payload = null
      memCache.exp     = 0

      // Busca qualquer isp_id ativo para fazer a chamada
      const [isp] = await sql`SELECT id FROM isp WHERE active = true LIMIT 1`
      if (isp) await resolve(isp.id)
    } catch (err) {
      app.log.error('[license] erro na verificação periódica:', err.message)
    }
  }

  // Primeira verificação + intervalo
  runCheck()
  setInterval(runCheck, CHECK_INTERVAL)

  // ── Rotas de status ────────────────────────────────────────
  app.get('/api/v1/license/status', async (req, reply) => {
    try { await req.jwtVerify() } catch { return reply.code(401).send({ error: 'Não autenticado' }) }
    try {
      const p = await resolve(req.user.isp_id)
      return p
    } catch {
      return { valid: false, status: 'unknown', error: 'Servidor inacessível' }
    }
  })

  // Força nova validação (admin pode chamar para reativar após pagamento)
  app.post('/api/v1/license/refresh', async (req, reply) => {
    try { await req.jwtVerify() } catch { return reply.code(401).send({ error: 'Não autenticado' }) }
    await redis.del(CACHE_KEY).catch(() => {})
    memCache.payload = null
    memCache.exp     = 0
    try {
      return await resolve(req.user.isp_id)
    } catch (err) {
      return reply.code(502).send({ error: 'Servidor de licenças inacessível' })
    }
  })
}
HEREDOC_EOF

mkdir -p "$(dirname "$INSTALL_DIR/src/jobs/scheduler.js")"
cat > "$INSTALL_DIR/src/jobs/scheduler.js" << 'HEREDOC_EOF'
// src/jobs/scheduler.js
// Jobs periódicos do LiberMap
import { sql, redis } from '../config/db.js'
import { runMonitoringJobs } from '../services/monitoring.js'

function every(ms, fn, name) {
  const run = async () => {
    try { await fn() }
    catch (err) { console.error(`[job:${name}]`, err.message) }
  }
  run() // roda imediatamente na inicialização
  return setInterval(run, ms)
}

export function startScheduler() {
  // Monitoramento: Zabbix + SmartOLT a cada 15 min
  every(15 * 60 * 1000, runMonitoringJobs, 'monitoring')

  // Limpeza de refresh tokens expirados: a cada 1h
  every(60 * 60 * 1000, async () => {
    const { count } = await sql`
      DELETE FROM refresh_token WHERE expires_at < NOW()
    `
    if (count > 0) console.info(`[job:cleanup] ${count} refresh tokens removidos`)
  }, 'cleanup-tokens')

  // Limpeza de access_log antigo (> 90 dias): a cada 24h
  every(24 * 60 * 60 * 1000, async () => {
    await sql`DELETE FROM access_log WHERE created_at < NOW() - INTERVAL '90 days'`
  }, 'cleanup-logs')

  // Verifica licenças com grace period expirando em 24h e envia alerta
  every(6 * 60 * 60 * 1000, async () => {
    const expiring = await sql`
      SELECT l.isp_id, i.slug, i.name, l.grace_period_end, l.current_period_end
      FROM license l
      JOIN isp i ON i.id = l.isp_id
      WHERE l.status IN ('active','suspended')
        AND l.grace_period_end BETWEEN NOW() AND NOW() + INTERVAL '24 hours'
    `
    for (const lic of expiring) {
      console.warn(`[license] ISP ${lic.slug} (${lic.name}) — grace period expira em ${lic.grace_period_end}`)
      // Aqui você conectaria ao serviço de e-mail / WhatsApp
    }

    // Suspende licenças cujo grace period já passou
    const suspended = await sql`
      UPDATE license SET status = 'suspended'::license_status, updated_at = NOW()
      WHERE status = 'active'
        AND current_period_end < NOW()
        AND (grace_period_end IS NULL OR grace_period_end < NOW())
      RETURNING isp_id
    `
    if (suspended.length > 0) {
      console.warn(`[license] ${suspended.length} ISP(s) suspensos por inadimplência`)
      // Invalida cache de licença no Redis para forçar bloqueio imediato
      for (const { isp_id } of suspended) {
        await redis.del(`license:${isp_id}`)
      }
    }
  }, 'license-check')

  console.info('[scheduler] Jobs iniciados')
}
HEREDOC_EOF

mkdir -p "$(dirname "$INSTALL_DIR/src/services/monitoring.js")"
cat > "$INSTALL_DIR/src/services/monitoring.js" << 'HEREDOC_EOF'
// src/services/monitoring.js
// Sync Zabbix + SmartOLT → atualiza rx_power e signal_status dos clientes
import { sql, redis } from '../config/db.js'

// ─────────────────────────────────────────────────────────────
// ZABBIX
// ─────────────────────────────────────────────────────────────
async function zabbixRequest(cfg, method, params) {
  const res = await fetch(`${cfg.url}/api_jsonrpc.php`, {
    method: 'POST',
    headers: { 'Content-Type': 'application/json' },
    body: JSON.stringify({ jsonrpc: '2.0', method, params, id: 1, auth: cfg._token }),
    signal: AbortSignal.timeout(15_000),
  })
  const { result, error } = await res.json()
  if (error) throw new Error(`Zabbix ${method}: ${error.data}`)
  return result
}

async function zabbixAuth(cfg) {
  const cacheKey = `zbx_token:${cfg.isp_id}`
  const cached = await redis.get(cacheKey)
  if (cached) return cached

  const token = await zabbixRequest({ ...cfg, _token: null }, 'user.login', {
    username: cfg.login,
    password: cfg.password,
  })
  await redis.setEx(cacheKey, 3500, token)
  return token
}

export async function syncZabbix(isp_id) {
  const [isp] = await sql`SELECT settings FROM isp WHERE id = ${isp_id}`
  const cfg = isp?.settings?.zabbix
  if (!cfg?.url || !cfg?.login) throw new Error('Zabbix não configurado')

  cfg.isp_id = isp_id
  cfg._token = await zabbixAuth(cfg)

  // Busca todos os hosts com o item de status configurado
  const hosts = await zabbixRequest(cfg, 'host.get', {
    output: ['hostid', 'host', 'name'],
    selectItems: [
      { name: cfg.status_item  || 'onu.status',   key_: 'onu.status'   },
      { name: cfg.power_item   || 'onu.rx_power',  key_: 'onu.rx_power' },
    ],
  })

  let updated = 0, notFound = 0
  const logs = []

  for (const host of hosts) {
    const clientCode = host.host  // key_ no Zabbix = código do cliente no LiberMap

    const statusItem = host.items?.find(i => i.key_ === (cfg.status_item || 'onu.status'))
    const powerItem  = host.items?.find(i => i.key_ === (cfg.power_item  || 'onu.rx_power'))

    if (!statusItem && !powerItem) continue

    // Busca o último valor de cada item
    const itemIds = [statusItem?.itemid, powerItem?.itemid].filter(Boolean)
    const history = await zabbixRequest(cfg, 'history.get', {
      output: 'extend',
      itemids: itemIds,
      sortfield: 'clock',
      sortorder: 'DESC',
      limit: 2,
    })

    const statusVal = history.find(h => h.itemid === statusItem?.itemid)?.value
    const powerVal  = history.find(h => h.itemid === powerItem?.itemid)?.value

    // Zabbix: status=1 → OK, qualquer outro → ERROR
    const isOnline = statusVal === '1' || statusVal === null
    const rxPower  = powerVal ? parseFloat(powerVal) : null
    const signalStatus = !isOnline ? 'offline' :
      rxPower === null ? 'ok' :
      rxPower > -8     ? 'saturated' :
      rxPower >= -28   ? 'ok' :
      rxPower >= -30   ? 'warning' : 'critical'

    const [cl] = await sql`
      UPDATE network_client SET
        rx_power      = ${rxPower},
        signal_status = ${signalStatus}::signal_status,
        last_seen     = NOW(),
        impl_state    = CASE
          WHEN impl_state = 'PLANEJADO' AND ${rxPower} IS NOT NULL
          THEN 'IMPLANTADO'::impl_state ELSE impl_state END,
        updated_at    = NOW()
      WHERE isp_id = ${isp_id}
        AND (pppoe_login = ${clientCode} OR ont_serial = ${clientCode})
      RETURNING id
    `

    if (cl) updated++
    else { notFound++; logs.push({ host: clientCode, issue: 'not_found' }) }
  }

  return { updated, not_found: notFound, total: hosts.length, logs }
}

// ─────────────────────────────────────────────────────────────
// SmartOLT
// ─────────────────────────────────────────────────────────────
async function smartoltFetch(cfg, path) {
  const res = await fetch(`${cfg.url}/api/v2${path}`, {
    headers: { Authorization: `Bearer ${cfg.api_key}` },
    signal: AbortSignal.timeout(15_000),
  })
  if (!res.ok) throw new Error(`SmartOLT ${path}: HTTP ${res.status}`)
  return res.json()
}

export async function syncSmartOLT(isp_id) {
  const [isp] = await sql`SELECT settings FROM isp WHERE id = ${isp_id}`
  const cfg = isp?.settings?.smartolt
  if (!cfg?.url || !cfg?.api_key) throw new Error('SmartOLT não configurado')

  let page = 1, updated = 0, notFound = 0, autoImplanted = 0
  const PAGE_SIZE = 100

  while (true) {
    const { data, total } = await smartoltFetch(cfg, `/onus?page=${page}&per_page=${PAGE_SIZE}`)
    if (!data?.length) break

    for (const onu of data) {
      const serial = onu.sn || onu.serial
      if (!serial) continue

      const rxPower = onu.rx_power != null ? parseFloat(onu.rx_power) : null

      // SmartOLT: Online→OK, Offline/LOSPower→ERROR
      const signalStatus =
        onu.status === 'Online'                   ? (
          rxPower === null ? 'ok' :
          rxPower > -8    ? 'saturated' :
          rxPower >= -28  ? 'ok' :
          rxPower >= -30  ? 'warning' : 'critical'
        ) :
        onu.status === 'Offline'                  ? 'offline' :
        onu.status?.includes('LOSPower')          ? 'offline' :
        'offline'

      const [cl] = await sql`
        UPDATE network_client SET
          rx_power      = ${rxPower},
          signal_status = ${signalStatus}::signal_status,
          last_seen     = NOW(),
          impl_state    = CASE
            WHEN impl_state = 'PLANEJADO' AND ${rxPower} IS NOT NULL AND ${signalStatus} != 'offline'
            THEN 'IMPLANTADO'::impl_state ELSE impl_state END,
          updated_at    = NOW()
        WHERE isp_id = ${isp_id} AND ont_serial = ${serial}
        RETURNING id, impl_state
      `

      if (cl) {
        updated++
        if (cl.impl_state === 'IMPLANTADO') autoImplanted++
      } else notFound++
    }

    if (page * PAGE_SIZE >= total) break
    page++
  }

  return { updated, not_found: notFound, auto_implanted: autoImplanted }
}

// ─────────────────────────────────────────────────────────────
// Job runner (chamado pelo cron)
// ─────────────────────────────────────────────────────────────
export async function runMonitoringJobs() {
  const isps = await sql`
    SELECT id, settings FROM isp WHERE active = true
  `

  for (const isp of isps) {
    // Zabbix
    if (isp.settings?.zabbix?.url) {
      syncZabbix(isp.id).catch(err =>
        console.error(`[zabbix] ISP ${isp.id}:`, err.message)
      )
    }
    // SmartOLT
    if (isp.settings?.smartolt?.url) {
      syncSmartOLT(isp.id).catch(err =>
        console.error(`[smartolt] ISP ${isp.id}:`, err.message)
      )
    }
  }
}
HEREDOC_EOF

mkdir -p "$(dirname "$INSTALL_DIR/src/routes/auth.js")"
cat > "$INSTALL_DIR/src/routes/auth.js" << 'HEREDOC_EOF'
// src/routes/auth.js
import { z } from 'zod'
import crypto from 'node:crypto'
import {
  login, hashPassword, saveRefreshToken,
  validateRefreshToken, revokeRefreshToken,
  requireAuth,
} from '../middleware/auth.js'
import { sql } from '../config/db.js'

const loginSchema = z.object({
  email:    z.string().email(),
  password: z.string().min(6),
  isp_slug: z.string().min(2),
})

const ACCESS_TTL  = 60 * 15          // 15 min
const REFRESH_TTL = 60 * 60 * 24 * 7 // 7 dias

export default async function authRoutes(app) {

  // ── POST /auth/login ─────────────────────────────────────────
  app.post('/auth/login', async (req, reply) => {
    const body = loginSchema.safeParse(req.body)
    if (!body.success) return reply.code(400).send({ error: body.error.flatten() })

    let user
    try {
      user = await login(body.data)
    } catch (err) {
      return reply.code(err.statusCode ?? 401).send({ error: err.message })
    }

    // Access token (curto)
    const accessToken = app.jwt.sign(
      { id: user.id, isp_id: user.isp_id, name: user.name, role: user.role },
      { expiresIn: ACCESS_TTL }
    )

    // Refresh token (opaco, hash no banco)
    const rawRefresh  = crypto.randomBytes(48).toString('hex')
    const refreshHash = crypto.createHash('sha256').update(rawRefresh).digest('hex')
    const expiresAt   = new Date(Date.now() + REFRESH_TTL * 1000)

    await saveRefreshToken(user.id, refreshHash, expiresAt)

    reply.setCookie('lm_refresh', rawRefresh, {
      httpOnly: true,
      secure:   process.env.NODE_ENV === 'production',
      sameSite: 'lax',
      path:     '/auth/refresh',
      maxAge:   REFRESH_TTL,
    })

    return {
      access_token: accessToken,
      expires_in:   ACCESS_TTL,
      user: {
        id:     user.id,
        name:   user.name,
        email:  user.email,
        role:   user.role,
        isp_id: user.isp_id,
      },
    }
  })

  // ── POST /auth/refresh ───────────────────────────────────────
  app.post('/auth/refresh', async (req, reply) => {
    const raw = req.cookies?.lm_refresh
    if (!raw) return reply.code(401).send({ error: 'Sem refresh token' })

    const hash  = crypto.createHash('sha256').update(raw).digest('hex')
    const token = await validateRefreshToken(hash)
    if (!token) return reply.code(401).send({ error: 'Refresh token inválido ou expirado' })

    const accessToken = app.jwt.sign(
      { id: token.user_id, isp_id: token.isp_id, name: token.name, role: token.role },
      { expiresIn: ACCESS_TTL }
    )

    return { access_token: accessToken, expires_in: ACCESS_TTL }
  })

  // ── POST /auth/logout ────────────────────────────────────────
  app.post('/auth/logout', { preHandler: [requireAuth] }, async (req, reply) => {
    const raw = req.cookies?.lm_refresh
    if (raw) {
      const hash = crypto.createHash('sha256').update(raw).digest('hex')
      await revokeRefreshToken(hash)
    }
    reply.clearCookie('lm_refresh', { path: '/auth/refresh' })
    return { ok: true }
  })

  // ── GET /auth/me ─────────────────────────────────────────────
  app.get('/auth/me', { preHandler: [requireAuth] }, async (req) => {
    const [user] = await sql`
      SELECT id, isp_id, name, email, role, avatar_url, last_login
      FROM app_user WHERE id = ${req.user.id}
    `
    return user
  })

  // ── POST /auth/change-password ───────────────────────────────
  app.post('/auth/change-password', { preHandler: [requireAuth] }, async (req, reply) => {
    const { current_password, new_password } = req.body ?? {}
    if (!current_password || !new_password?.length >= 8)
      return reply.code(400).send({ error: 'Campos obrigatórios' })

    const [user] = await sql`
      SELECT password_hash FROM app_user WHERE id = ${req.user.id}
    `
    const { bcrypt: bc } = await import('bcrypt')
    const ok = await bc.compare(current_password, user.password_hash)
    if (!ok) return reply.code(401).send({ error: 'Senha atual incorreta' })

    const hash = await hashPassword(new_password)
    await sql`UPDATE app_user SET password_hash = ${hash} WHERE id = ${req.user.id}`

    return { ok: true }
  })
}
HEREDOC_EOF

mkdir -p "$(dirname "$INSTALL_DIR/src/routes/users.js")"
cat > "$INSTALL_DIR/src/routes/users.js" << 'HEREDOC_EOF'
// src/routes/users.js
import { z } from 'zod'
import { sql, withISP } from '../config/db.js'
import { requireAuth, requireRole, hashPassword } from '../middleware/auth.js'
import { can, invalidatePermCache } from '../middleware/permissions.js'

const adm  = [requireAuth, requireRole('admin')]
const eng  = [requireAuth, requireRole('engineer')]

const userCreateSchema = z.object({
  name:       z.string().min(2),
  email:      z.string().email(),
  password:   z.string().min(8),
  role:       z.enum(['admin','engineer','technician','sales','viewer']).default('viewer'),
  avatar_url: z.string().url().optional(),
})

const userUpdateSchema = z.object({
  name:       z.string().min(2).optional(),
  email:      z.string().email().optional(),
  role:       z.enum(['admin','engineer','technician','sales','viewer']).optional(),
  active:     z.boolean().optional(),
  avatar_url: z.string().url().optional(),
})

const permSchema = z.array(z.object({
  resource: z.string(),
  action:   z.enum(['read','create','update','delete','manage']),
  granted:  z.boolean(),
}))

export default async function userRoutes(app) {

  // ── GET /users ─────────────────────────────────────────────
  app.get('/users', { preHandler: [requireAuth, can('users','read')] }, async (req) => {
    const { active, role } = req.query
    return withISP(req.isp_id, async (tx) => tx`
      SELECT
        u.id, u.name, u.email, u.role, u.active,
        u.avatar_url, u.last_login, u.created_at,
        -- permissões customizadas
        COALESCE(
          jsonb_agg(
            jsonb_build_object('resource', up.resource, 'action', up.action, 'granted', up.granted)
          ) FILTER (WHERE up.id IS NOT NULL),
          '[]'
        ) AS custom_permissions
      FROM app_user u
      LEFT JOIN user_permission up ON up.user_id = u.id
      WHERE u.isp_id = ${req.isp_id}
        ${active !== undefined ? tx`AND u.active = ${active === 'true'}` : tx``}
        ${role ? tx`AND u.role = ${role}::user_role` : tx``}
      GROUP BY u.id
      ORDER BY u.name
    `)
  })

  // ── GET /users/:id ─────────────────────────────────────────
  app.get('/users/:id', { preHandler: [requireAuth, can('users','read')] }, async (req, reply) => {
    const [user] = await withISP(req.isp_id, async (tx) => tx`
      SELECT
        u.id, u.name, u.email, u.role, u.active,
        u.avatar_url, u.last_login, u.created_at,
        COALESCE(
          jsonb_agg(
            jsonb_build_object('resource', up.resource, 'action', up.action, 'granted', up.granted)
          ) FILTER (WHERE up.id IS NOT NULL),
          '[]'
        ) AS custom_permissions
      FROM app_user u
      LEFT JOIN user_permission up ON up.user_id = u.id
      WHERE u.id = ${req.params.id}::uuid AND u.isp_id = ${req.isp_id}
      GROUP BY u.id
    `)
    if (!user) return reply.code(404).send({ error: 'Usuário não encontrado' })
    return user
  })

  // ── GET /users/me/permissions ──────────────────────────────
  app.get('/users/me/permissions', { preHandler: [requireAuth] }, async (req) => {
    const rows = await sql`
      SELECT resource, action FROM role_permission rp
      JOIN app_user u ON u.role = rp.role AND (rp.isp_id = u.isp_id OR rp.isp_id IS NULL)
      WHERE u.id = ${req.user.id} AND u.active = true
      UNION
      SELECT resource, action FROM user_permission up
      WHERE up.user_id = ${req.user.id} AND up.granted = true
      EXCEPT
      SELECT resource, action FROM user_permission up
      WHERE up.user_id = ${req.user.id} AND up.granted = false
      ORDER BY resource, action
    `
    return {
      role: req.user.role,
      permissions: rows,
      // Mapa conveniente: { nodes: ['read','create',...], ... }
      by_resource: rows.reduce((acc, r) => {
        if (!acc[r.resource]) acc[r.resource] = []
        acc[r.resource].push(r.action)
        return acc
      }, {}),
    }
  })

  // ── POST /users ────────────────────────────────────────────
  app.post('/users', { preHandler: [requireAuth, can('users','create')] }, async (req, reply) => {
    const body = userCreateSchema.safeParse(req.body)
    if (!body.success) return reply.code(400).send({ error: body.error.flatten() })

    // Verifica limite de usuários da licença
    if (req.license) {
      const [count] = await sql`
        SELECT COUNT(*) AS n FROM app_user WHERE isp_id = ${req.isp_id} AND active = true
      `
      if (Number(count.n) >= req.license.max_users) {
        return reply.code(402).send({
          error: `Limite de usuários atingido (${req.license.max_users}) para o plano ${req.license.plan}`,
          code: 'USER_LIMIT_REACHED',
        })
      }
    }

    const d = body.data
    // Admin não pode criar outro admin (só superadmin)
    if (d.role === 'admin' && req.user.role !== 'superadmin') {
      return reply.code(403).send({ error: 'Apenas superadmin pode criar admins' })
    }

    const hash = await hashPassword(d.password)

    const [user] = await sql`
      INSERT INTO app_user (isp_id, name, email, password_hash, role, avatar_url)
      VALUES (${req.isp_id}, ${d.name}, ${d.email.toLowerCase()}, ${hash}, ${d.role}::user_role, ${d.avatar_url ?? null})
      RETURNING id, name, email, role, active, created_at
    `

    return reply.code(201).send(user)
  })

  // ── PATCH /users/:id ───────────────────────────────────────
  app.patch('/users/:id', { preHandler: [requireAuth, can('users','update')] }, async (req, reply) => {
    const body = userUpdateSchema.safeParse(req.body)
    if (!body.success) return reply.code(400).send({ error: body.error.flatten() })

    const d = body.data

    // Não pode alterar própria role ou desativar a si mesmo
    if (req.params.id === req.user.id) {
      if (d.role !== undefined) return reply.code(400).send({ error: 'Não pode alterar a própria role' })
      if (d.active === false)   return reply.code(400).send({ error: 'Não pode desativar a própria conta' })
    }

    const [updated] = await withISP(req.isp_id, async (tx) => tx`
      UPDATE app_user SET
        name       = COALESCE(${d.name       ?? null}, name),
        email      = COALESCE(${d.email      ?? null}, email),
        role       = COALESCE(${d.role       ? tx`${d.role}::user_role` : tx`role`}),
        active     = COALESCE(${d.active     ?? null}, active),
        avatar_url = COALESCE(${d.avatar_url ?? null}, avatar_url)
      WHERE id = ${req.params.id}::uuid AND isp_id = ${req.isp_id}
      RETURNING id, name, email, role, active, avatar_url
    `)

    if (!updated) return reply.code(404).send({ error: 'Usuário não encontrado' })
    invalidatePermCache(req.params.id)
    return updated
  })

  // ── PATCH /users/:id/permissions ───────────────────────────
  // Sobrescreve permissões customizadas do usuário
  app.patch('/users/:id/permissions', { preHandler: [requireAuth, can('users','manage')] }, async (req, reply) => {
    const body = permSchema.safeParse(req.body)
    if (!body.success) return reply.code(400).send({ error: body.error.flatten() })

    // Verifica que o usuário pertence ao ISP
    const [user] = await sql`SELECT id FROM app_user WHERE id = ${req.params.id}::uuid AND isp_id = ${req.isp_id}`
    if (!user) return reply.code(404).send({ error: 'Usuário não encontrado' })

    await sql.begin(async (tx) => {
      // Remove permissões existentes
      await tx`DELETE FROM user_permission WHERE user_id = ${req.params.id}::uuid`
      // Insere novas
      if (body.data.length > 0) {
        await tx`
          INSERT INTO user_permission (user_id, resource, action, granted, created_by)
          SELECT ${req.params.id}::uuid, p.resource, p.action::permission_action, p.granted, ${req.user.id}::uuid
          FROM jsonb_to_recordset(${JSON.stringify(body.data)}::jsonb) AS p(resource text, action text, granted bool)
        `
      }
    })

    invalidatePermCache(req.params.id)
    return { ok: true, updated: body.data.length }
  })

  // ── PATCH /users/:id/password ──────────────────────────────
  app.patch('/users/:id/reset-password', { preHandler: [requireAuth, can('users','manage')] }, async (req, reply) => {
    const { new_password } = req.body ?? {}
    if (!new_password || new_password.length < 8)
      return reply.code(400).send({ error: 'Senha deve ter pelo menos 8 caracteres' })

    const hash = await hashPassword(new_password)
    const [r] = await withISP(req.isp_id, async (tx) => tx`
      UPDATE app_user SET password_hash = ${hash}
      WHERE id = ${req.params.id}::uuid AND isp_id = ${req.isp_id}
      RETURNING id
    `)
    if (!r) return reply.code(404).send({ error: 'Usuário não encontrado' })
    return { ok: true }
  })

  // ── DELETE /users/:id ──────────────────────────────────────
  app.delete('/users/:id', { preHandler: [requireAuth, can('users','delete')] }, async (req, reply) => {
    if (req.params.id === req.user.id)
      return reply.code(400).send({ error: 'Não pode deletar a própria conta' })

    // Soft delete: desativa em vez de deletar
    const [r] = await withISP(req.isp_id, async (tx) => tx`
      UPDATE app_user SET active = false
      WHERE id = ${req.params.id}::uuid AND isp_id = ${req.isp_id}
      RETURNING id
    `)
    if (!r) return reply.code(404).send({ error: 'Usuário não encontrado' })
    invalidatePermCache(req.params.id)
    return { deleted: r.id }
  })

  // ── GET /roles ─────────────────────────────────────────────
  // Lista todas as roles com permissões padrão
  app.get('/roles', { preHandler: [requireAuth, can('users','read')] }, async () => {
    const rows = await sql`
      SELECT role, resource, action
      FROM role_permission
      WHERE isp_id IS NULL
      ORDER BY role, resource, action
    `
    return rows.reduce((acc, r) => {
      if (!acc[r.role]) acc[r.role] = {}
      if (!acc[r.role][r.resource]) acc[r.role][r.resource] = []
      acc[r.role][r.resource].push(r.action)
      return acc
    }, {})
  })

  // ── GET /audit ─────────────────────────────────────────────
  app.get('/audit', { preHandler: [requireAuth, can('users','manage')] }, async (req) => {
    const { user_id, entity, from, to, limit = 100, offset = 0 } = req.query
    return sql`
      SELECT
        a.id, a.entity, a.entity_id, a.action, a.diff, a.created_at,
        u.name AS user_name, u.email AS user_email
      FROM audit_log a
      LEFT JOIN app_user u ON u.id = a.user_id
      WHERE a.isp_id = ${req.isp_id}
        ${user_id ? sql`AND a.user_id = ${user_id}::uuid`   : sql``}
        ${entity  ? sql`AND a.entity  = ${entity}`          : sql``}
        ${from    ? sql`AND a.created_at >= ${from}::timestamptz` : sql``}
        ${to      ? sql`AND a.created_at <= ${to}::timestamptz`   : sql``}
      ORDER BY a.created_at DESC
      LIMIT ${Number(limit)} OFFSET ${Number(offset)}
    `
  })
}
HEREDOC_EOF

mkdir -p "$(dirname "$INSTALL_DIR/src/routes/nodes.js")"
cat > "$INSTALL_DIR/src/routes/nodes.js" << 'HEREDOC_EOF'
// src/routes/nodes.js
import { z } from 'zod'
import { sql, withISP, cached, invalidate } from '../config/db.js'
import { requireAuth, requireRole } from '../middleware/auth.js'

const auth  = [requireAuth]
const eng   = [requireAuth, requireRole('engineer')]

// Schemas
const pointSchema = z.object({
  lat: z.number().min(-90).max(90),
  lng: z.number().min(-180).max(180),
})

const nodeCreateSchema = z.object({
  node_type:      z.enum(['OLT','CEO','CTO','ONT','PASS']),
  name:           z.string().min(1).max(100),
  address:        z.string().optional(),
  location:       pointSchema,
  parent_id:      z.string().uuid().optional(),
  pole_id:        z.string().uuid().optional(),
  project_id:     z.string().uuid().optional(),
  model:          z.string().optional(),
  manufacturer:   z.string().optional(),
  serial:         z.string().optional(),
  capacity_ports: z.number().int().positive().optional(),
  olt_ip:         z.string().ip().optional(),
  olt_vendor:     z.string().optional(),
  ont_serial:     z.string().optional(),
  impl_state:     z.enum(['PLANEJADO','EM_IMPLANTACAO','IMPLANTADO','DESATIVADO']).default('PLANEJADO'),
  border_color:   z.string().regex(/^#[0-9A-Fa-f]{6}$/).optional(),
  tags:           z.array(z.string()).default([]),
  metadata:       z.record(z.unknown()).default({}),
})

const nodeUpdateSchema = nodeCreateSchema.partial()

export default async function nodeRoutes(app) {

  // ── GET /nodes ───────────────────────────────────────────────
  // Listagem com filtros (bbox, tipo, projeto, status)
  app.get('/nodes', { preHandler: auth }, async (req, reply) => {
    const {
      bbox,           // "lng_min,lat_min,lng_max,lat_max"
      node_type,
      project_id,
      impl_state,
      signal_status,
      limit  = 200,
      offset = 0,
    } = req.query

    const isp_id = req.isp_id

    const cacheKey = `nodes:${isp_id}:${JSON.stringify(req.query)}`

    return cached(cacheKey, 30, async () =>
      withISP(isp_id, async (tx) => {
        let rows

        if (bbox) {
          const [lngMin, latMin, lngMax, latMax] = bbox.split(',').map(Number)
          rows = await tx`
            SELECT
              n.id, n.node_type, n.name, n.address, n.impl_state, n.signal_status,
              n.rx_power, n.ont_serial, n.pole_id, n.parent_id, n.project_id,
              n.border_color, n.tags, n.updated_at,
              ST_AsGeoJSON(n.location)::jsonb AS location
            FROM optical_node n
            WHERE n.isp_id = ${isp_id}
              AND n.location && ST_MakeEnvelope(${lngMin}, ${latMin}, ${lngMax}, ${latMax}, 4326)
              ${node_type    ? tx`AND n.node_type    = ${node_type}::node_type`       : tx``}
              ${project_id   ? tx`AND n.project_id   = ${project_id}::uuid`           : tx``}
              ${impl_state   ? tx`AND n.impl_state   = ${impl_state}::impl_state`     : tx``}
              ${signal_status? tx`AND n.signal_status= ${signal_status}::signal_status`: tx``}
            ORDER BY n.updated_at DESC
            LIMIT ${Number(limit)} OFFSET ${Number(offset)}
          `
        } else {
          rows = await tx`
            SELECT
              n.id, n.node_type, n.name, n.address, n.impl_state, n.signal_status,
              n.rx_power, n.ont_serial, n.pole_id, n.parent_id, n.project_id,
              n.border_color, n.tags, n.updated_at,
              ST_AsGeoJSON(n.location)::jsonb AS location
            FROM optical_node n
            WHERE n.isp_id = ${isp_id}
              ${node_type  ? tx`AND n.node_type  = ${node_type}::node_type`   : tx``}
              ${project_id ? tx`AND n.project_id = ${project_id}::uuid`       : tx``}
              ${impl_state ? tx`AND n.impl_state = ${impl_state}::impl_state` : tx``}
            ORDER BY n.updated_at DESC
            LIMIT ${Number(limit)} OFFSET ${Number(offset)}
          `
        }

        return { data: rows, count: rows.length }
      })
    )
  })

  // ── GET /nodes/nearby ────────────────────────────────────────
  app.get('/nodes/nearby', { preHandler: auth }, async (req) => {
    const { lat, lng, radius_m = 500, node_type } = req.query
    if (!lat || !lng) return { data: [] }

    return withISP(req.isp_id, async (tx) => {
      return tx`
        SELECT
          n.id, n.node_type, n.name, n.impl_state, n.signal_status,
          ROUND(ST_Distance(
            n.location::geography,
            ST_MakePoint(${Number(lng)}, ${Number(lat)})::geography
          )::NUMERIC, 1) AS distance_m,
          ST_AsGeoJSON(n.location)::jsonb AS location
        FROM optical_node n
        WHERE n.isp_id = ${req.isp_id}
          AND ST_DWithin(
            n.location::geography,
            ST_MakePoint(${Number(lng)}, ${Number(lat)})::geography,
            ${Number(radius_m)}
          )
          ${node_type ? tx`AND n.node_type = ${node_type}::node_type` : tx``}
        ORDER BY distance_m
        LIMIT 50
      `
    })
  })

  // ── GET /nodes/:id ───────────────────────────────────────────
  app.get('/nodes/:id', { preHandler: auth }, async (req, reply) => {
    const { id } = req.params

    const [node] = await withISP(req.isp_id, async (tx) => tx`
      SELECT
        n.*,
        ST_AsGeoJSON(n.location)::jsonb AS location,
        p.code AS pole_code,
        p.address AS pole_address,
        par.name AS parent_name,
        par.node_type AS parent_type
      FROM optical_node n
      LEFT JOIN pole p ON p.id = n.pole_id
      LEFT JOIN optical_node par ON par.id = n.parent_id
      WHERE n.id = ${id}::uuid AND n.isp_id = ${req.isp_id}
    `)

    if (!node) return reply.code(404).send({ error: 'Nó não encontrado' })
    return node
  })

  // ── POST /nodes ──────────────────────────────────────────────
  app.post('/nodes', { preHandler: eng }, async (req, reply) => {
    const body = nodeCreateSchema.safeParse(req.body)
    if (!body.success) return reply.code(400).send({ error: body.error.flatten() })

    const d = body.data
    const point = `SRID=4326;POINT(${d.location.lng} ${d.location.lat})`

    const [node] = await withISP(req.isp_id, async (tx) => tx`
      INSERT INTO optical_node (
        isp_id, project_id, pole_id, parent_id,
        node_type, name, address, location,
        model, manufacturer, serial, capacity_ports,
        olt_ip, olt_vendor, ont_serial,
        impl_state, border_color, tags, metadata
      ) VALUES (
        ${req.isp_id}, ${d.project_id ?? null}, ${d.pole_id ?? null}, ${d.parent_id ?? null},
        ${d.node_type}::node_type, ${d.name}, ${d.address ?? null}, ${point}::geometry,
        ${d.model ?? null}, ${d.manufacturer ?? null}, ${d.serial ?? null}, ${d.capacity_ports ?? null},
        ${d.olt_ip ?? null}, ${d.olt_vendor ?? null}, ${d.ont_serial ?? null},
        ${d.impl_state}::impl_state, ${d.border_color ?? '#185FA5'}, ${d.tags}, ${JSON.stringify(d.metadata)}
      )
      RETURNING *, ST_AsGeoJSON(location)::jsonb AS location
    `)

    // Invalida cache de tiles da região
    await invalidate(`tiles:${req.isp_id}`)

    return reply.code(201).send(node)
  })

  // ── PATCH /nodes/:id ─────────────────────────────────────────
  app.patch('/nodes/:id', { preHandler: eng }, async (req, reply) => {
    const { id } = req.params
    const body = nodeUpdateSchema.safeParse(req.body)
    if (!body.success) return reply.code(400).send({ error: body.error.flatten() })

    const d = body.data
    const sets = []
    const vals = {}

    if (d.name          !== undefined) { sets.push('name = ${name}');                   vals.name = d.name }
    if (d.address       !== undefined) { sets.push('address = ${address}');             vals.address = d.address }
    if (d.impl_state    !== undefined) { sets.push('impl_state = ${impl_state}::impl_state'); vals.impl_state = d.impl_state }
    if (d.signal_status !== undefined) { sets.push('signal_status = ${signal_status}::signal_status'); vals.signal_status = d.signal_status }
    if (d.rx_power      !== undefined) { sets.push('rx_power = ${rx_power}');           vals.rx_power = d.rx_power }
    if (d.tags          !== undefined) { sets.push('tags = ${tags}');                   vals.tags = d.tags }
    if (d.border_color  !== undefined) { sets.push('border_color = ${border_color}');   vals.border_color = d.border_color }
    if (d.location      !== undefined) {
      const pt = `SRID=4326;POINT(${d.location.lng} ${d.location.lat})`
      sets.push('location = ${loc}::geometry')
      vals.loc = pt
    }

    if (!sets.length) return reply.code(400).send({ error: 'Nenhum campo para atualizar' })

    // Monta UPDATE dinâmico com postgres.js
    const [updated] = await withISP(req.isp_id, async (tx) => tx`
      UPDATE optical_node
      SET ${tx(d, ...Object.keys(d).filter(k => k !== 'location'))},
          updated_at = NOW()
          ${d.location ? tx`, location = ST_GeomFromText('SRID=4326;POINT(' || ${d.location.lng} || ' ' || ${d.location.lat} || ')', 4326)` : tx``}
      WHERE id = ${id}::uuid AND isp_id = ${req.isp_id}
      RETURNING *, ST_AsGeoJSON(location)::jsonb AS location
    `)

    if (!updated) return reply.code(404).send({ error: 'Nó não encontrado' })
    await invalidate(`tiles:${req.isp_id}`)
    return updated
  })

  // ── DELETE /nodes/:id ────────────────────────────────────────
  app.delete('/nodes/:id', { preHandler: [requireAuth, requireRole('admin')] }, async (req, reply) => {
    const [deleted] = await withISP(req.isp_id, async (tx) => tx`
      DELETE FROM optical_node
      WHERE id = ${req.params.id}::uuid AND isp_id = ${req.isp_id}
      RETURNING id
    `)
    if (!deleted) return reply.code(404).send({ error: 'Nó não encontrado' })
    await invalidate(`tiles:${req.isp_id}`)
    return { deleted: deleted.id }
  })

  // ── GET /nodes/:id/budget ─────────────────────────────────────
  // Orçamento óptico recursivo para um nó ONT/cliente
  app.get('/nodes/:id/budget', { preHandler: auth }, async (req) => {
    const cacheKey = `budget:${req.params.id}`
    return cached(cacheKey, 60, async () => {
      return sql`SELECT * FROM calc_optical_budget(${req.params.id}::uuid)`
    })
  })

  // ── GET /nodes/:id/ports ──────────────────────────────────────
  app.get('/nodes/:id/ports', { preHandler: auth }, async (req) => {
    return withISP(req.isp_id, async (tx) => tx`
      SELECT
        s.id AS splitter_id, s.name, s.ratio, s.technology, s.insertion_loss,
        sp.id AS port_id, sp.port_number, sp.active, sp.ratio_pct,
        sp.client_id, cl.name AS client_name, cl.pppoe_login, cl.rx_power, cl.signal_status,
        sp.downstream_splitter_id
      FROM splitter s
      JOIN splitter_port sp ON sp.splitter_id = s.id
      LEFT JOIN network_client cl ON cl.id = sp.client_id
      WHERE s.node_id = ${req.params.id}::uuid
      ORDER BY s.position NULLS LAST, sp.port_number
    `)
  })
}
HEREDOC_EOF

mkdir -p "$(dirname "$INSTALL_DIR/src/routes/cables.js")"
cat > "$INSTALL_DIR/src/routes/cables.js" << 'HEREDOC_EOF'
// src/routes/cables.js
import { z } from 'zod'
import { sql, withISP, cached, invalidate } from '../config/db.js'
import { requireAuth, requireRole } from '../middleware/auth.js'

const auth = [requireAuth]
const eng  = [requireAuth, requireRole('engineer')]

const lineStringSchema = z.object({
  type:        z.literal('LineString'),
  coordinates: z.array(z.tuple([z.number(), z.number()])).min(2),
})

const cableCreateSchema = z.object({
  name:        z.string().optional(),
  cable_type:  z.enum(['ADSS','AS','SUBTERRANEO','DROP_FIGURE8','INTERNO','DUTO']).default('AS'),
  fiber_count: z.number().int().positive(),
  level:       z.number().int().min(1).max(3).default(1),
  node_start:  z.string().uuid().optional(),
  node_end:    z.string().uuid().optional(),
  project_id:  z.string().uuid().optional(),
  path:        lineStringSchema,
  att_db_km:   z.number().min(0).max(2).default(0.35),
  color:       z.string().regex(/^#[0-9A-Fa-f]{6}$/).optional(),
  impl_state:  z.enum(['PLANEJADO','EM_IMPLANTACAO','IMPLANTADO','DESATIVADO']).default('PLANEJADO'),
  tags:        z.array(z.string()).default([]),
})

const cableUpdateSchema = cableCreateSchema.partial()

function pathToWKT(geojson) {
  const coords = geojson.coordinates.map(([lng, lat]) => `${lng} ${lat}`).join(', ')
  return `SRID=4326;LINESTRING(${coords})`
}

export default async function cableRoutes(app) {

  // ── GET /cables ───────────────────────────────────────────────
  app.get('/cables', { preHandler: auth }, async (req) => {
    const { bbox, level, project_id, impl_state, limit = 500, offset = 0 } = req.query
    const isp_id = req.isp_id

    const cacheKey = `cables:${isp_id}:${JSON.stringify(req.query)}`
    return cached(cacheKey, 30, () =>
      withISP(isp_id, async (tx) => {
        if (bbox) {
          const [lngMin, latMin, lngMax, latMax] = bbox.split(',').map(Number)
          return tx`
            SELECT
              c.id, c.name, c.cable_type, c.fiber_count, c.level,
              c.impl_state, c.att_db_km, c.color, c.tags,
              ROUND(c.length_m::NUMERIC, 1) AS length_m,
              c.node_start, c.node_end, c.project_id,
              ST_AsGeoJSON(c.path)::jsonb AS path
            FROM cable c
            WHERE c.isp_id = ${isp_id}
              AND c.path && ST_MakeEnvelope(${lngMin},${latMin},${lngMax},${latMax},4326)
              ${level      ? tx`AND c.level      = ${Number(level)}`                : tx``}
              ${project_id ? tx`AND c.project_id = ${project_id}::uuid`             : tx``}
              ${impl_state ? tx`AND c.impl_state = ${impl_state}::impl_state`       : tx``}
            ORDER BY c.level, c.updated_at DESC
            LIMIT ${Number(limit)} OFFSET ${Number(offset)}
          `
        }
        return tx`
          SELECT
            c.id, c.name, c.cable_type, c.fiber_count, c.level,
            c.impl_state, c.att_db_km, c.color, c.tags,
            ROUND(c.length_m::NUMERIC, 1) AS length_m,
            c.node_start, c.node_end, c.project_id,
            ST_AsGeoJSON(c.path)::jsonb AS path
          FROM cable c
          WHERE c.isp_id = ${isp_id}
            ${level      ? tx`AND c.level      = ${Number(level)}`          : tx``}
            ${project_id ? tx`AND c.project_id = ${project_id}::uuid`       : tx``}
            ${impl_state ? tx`AND c.impl_state = ${impl_state}::impl_state` : tx``}
          ORDER BY c.level, c.updated_at DESC
          LIMIT ${Number(limit)} OFFSET ${Number(offset)}
        `
      })
    )
  })

  // ── GET /cables/:id ───────────────────────────────────────────
  app.get('/cables/:id', { preHandler: auth }, async (req, reply) => {
    const [cable] = await withISP(req.isp_id, async (tx) => tx`
      SELECT
        c.*,
        ROUND(c.length_m::NUMERIC, 1) AS length_m,
        ST_AsGeoJSON(c.path)::jsonb AS path,
        ns.name AS node_start_name, ns.node_type AS node_start_type,
        ne.name AS node_end_name,   ne.node_type AS node_end_type
      FROM cable c
      LEFT JOIN optical_node ns ON ns.id = c.node_start
      LEFT JOIN optical_node ne ON ne.id = c.node_end
      WHERE c.id = ${req.params.id}::uuid AND c.isp_id = ${req.isp_id}
    `)
    if (!cable) return reply.code(404).send({ error: 'Cabo não encontrado' })
    return cable
  })

  // ── POST /cables ──────────────────────────────────────────────
  app.post('/cables', { preHandler: eng }, async (req, reply) => {
    const body = cableCreateSchema.safeParse(req.body)
    if (!body.success) return reply.code(400).send({ error: body.error.flatten() })

    const d = body.data
    const wkt = pathToWKT(d.path)

    const [cable] = await withISP(req.isp_id, async (tx) => tx`
      INSERT INTO cable (
        isp_id, project_id, node_start, node_end,
        name, cable_type, fiber_count, level,
        path, att_db_km, color, impl_state, tags
      ) VALUES (
        ${req.isp_id}, ${d.project_id ?? null},
        ${d.node_start ?? null}, ${d.node_end ?? null},
        ${d.name ?? null}, ${d.cable_type}::cable_type,
        ${d.fiber_count}, ${d.level},
        ${wkt}::geometry,
        ${d.att_db_km}, ${d.color ?? null},
        ${d.impl_state}::impl_state, ${d.tags}
      )
      RETURNING
        id, name, cable_type, fiber_count, level, impl_state,
        ROUND(length_m::NUMERIC, 1) AS length_m, att_db_km, color, tags,
        node_start, node_end,
        ST_AsGeoJSON(path)::jsonb AS path
    `)

    await invalidate(`tiles:${req.isp_id}`)
    await invalidate(`cables:${req.isp_id}`)
    return reply.code(201).send(cable)
  })

  // ── PATCH /cables/:id ─────────────────────────────────────────
  app.patch('/cables/:id', { preHandler: eng }, async (req, reply) => {
    const body = cableUpdateSchema.safeParse(req.body)
    if (!body.success) return reply.code(400).send({ error: body.error.flatten() })

    const d = body.data
    const wkt = d.path ? pathToWKT(d.path) : undefined

    const [updated] = await withISP(req.isp_id, async (tx) => tx`
      UPDATE cable SET
        name        = COALESCE(${d.name       ?? null}, name),
        cable_type  = COALESCE(${d.cable_type ? tx`${d.cable_type}::cable_type` : tx`cable_type`}),
        fiber_count = COALESCE(${d.fiber_count ?? null}, fiber_count),
        level       = COALESCE(${d.level      ?? null}, level),
        att_db_km   = COALESCE(${d.att_db_km  ?? null}, att_db_km),
        color       = COALESCE(${d.color      ?? null}, color),
        impl_state  = COALESCE(${d.impl_state ? tx`${d.impl_state}::impl_state` : tx`impl_state`}),
        tags        = COALESCE(${d.tags       ?? null}, tags),
        path        = COALESCE(${wkt ? tx`${wkt}::geometry` : tx`path`}),
        updated_at  = NOW()
      WHERE id = ${req.params.id}::uuid AND isp_id = ${req.isp_id}
      RETURNING
        id, name, cable_type, fiber_count, level, impl_state,
        ROUND(length_m::NUMERIC, 1) AS length_m,
        ST_AsGeoJSON(path)::jsonb AS path
    `)

    if (!updated) return reply.code(404).send({ error: 'Cabo não encontrado' })
    await invalidate(`tiles:${req.isp_id}`)
    await invalidate(`cables:${req.isp_id}`)
    return updated
  })

  // ── DELETE /cables/:id ────────────────────────────────────────
  app.delete('/cables/:id', { preHandler: [requireAuth, requireRole('admin')] }, async (req, reply) => {
    const [deleted] = await withISP(req.isp_id, async (tx) => tx`
      DELETE FROM cable
      WHERE id = ${req.params.id}::uuid AND isp_id = ${req.isp_id}
      RETURNING id
    `)
    if (!deleted) return reply.code(404).send({ error: 'Cabo não encontrado' })
    await invalidate(`tiles:${req.isp_id}`)
    await invalidate(`cables:${req.isp_id}`)
    return { deleted: deleted.id }
  })

  // ── GET /cables/coverage ──────────────────────────────────────
  // Retorna polígono de cobertura aproximado (convex hull dos cabos)
  app.get('/cables/coverage', { preHandler: auth }, async (req) => {
    const [result] = await withISP(req.isp_id, async (tx) => tx`
      SELECT ST_AsGeoJSON(
        ST_ConvexHull(ST_Collect(path))
      )::jsonb AS coverage
      FROM cable
      WHERE isp_id = ${req.isp_id}
        AND impl_state = 'IMPLANTADO'
    `)
    return result
  })
}
HEREDOC_EOF

mkdir -p "$(dirname "$INSTALL_DIR/src/routes/clients.js")"
cat > "$INSTALL_DIR/src/routes/clients.js" << 'HEREDOC_EOF'
// src/routes/clients.js
import { z } from 'zod'
import { sql, withISP, cached, invalidate } from '../config/db.js'
import { requireAuth, requireRole } from '../middleware/auth.js'

const auth = [requireAuth]
const eng  = [requireAuth, requireRole('engineer')]
const tech = [requireAuth, requireRole('technician')]

const clientCreateSchema = z.object({
  name:           z.string().min(1),
  document:       z.string().optional(),
  email:          z.string().email().optional(),
  phone:          z.string().optional(),
  address:        z.string().optional(),
  location:       z.object({ lat: z.number(), lng: z.number() }).optional(),
  project_id:     z.string().uuid().optional(),
  cto_id:         z.string().uuid().optional(),
  cto_port:       z.number().int().min(1).optional(),
  ont_serial:     z.string().optional(),
  pppoe_login:    z.string().optional(),
  plan_name:      z.string().optional(),
  plan_speed_down:z.number().int().optional(),
  plan_speed_up:  z.number().int().optional(),
  erp_id:         z.string().optional(),
  erp_source:     z.enum(['hubsoft','ixc','sgp','voalle','mk']).optional(),
  impl_state:     z.enum(['PLANEJADO','EM_IMPLANTACAO','IMPLANTADO','DESATIVADO']).default('PLANEJADO'),
  tags:           z.array(z.string()).default([]),
  metadata:       z.record(z.unknown()).default({}),
})

const clientUpdateSchema = clientCreateSchema.partial()

const signalUpdateSchema = z.object({
  rx_power:      z.number().min(-50).max(10),
  signal_status: z.enum(['ok','warning','critical','saturated','offline']).optional(),
  ont_serial:    z.string().optional(),
})

export default async function clientRoutes(app) {

  // ── GET /clients ──────────────────────────────────────────────
  app.get('/clients', { preHandler: auth }, async (req) => {
    const {
      bbox, cto_id, signal_status, impl_state,
      erp_source, search,
      limit = 100, offset = 0,
    } = req.query

    const cacheKey = `clients:${req.isp_id}:${JSON.stringify(req.query)}`
    return cached(cacheKey, 20, () =>
      withISP(req.isp_id, async (tx) => {
        if (bbox) {
          const [lngMin, latMin, lngMax, latMax] = bbox.split(',').map(Number)
          return tx`
            SELECT
              cl.id, cl.name, cl.pppoe_login, cl.plan_name,
              cl.ont_serial, cl.cto_id, cl.cto_port,
              cl.rx_power, cl.signal_status, cl.impl_state,
              cl.active, cl.erp_source, cl.erp_id, cl.tags,
              ST_AsGeoJSON(cl.location)::jsonb AS location,
              n.name AS cto_name
            FROM network_client cl
            LEFT JOIN optical_node n ON n.id = cl.cto_id
            WHERE cl.isp_id = ${req.isp_id}
              AND cl.location && ST_MakeEnvelope(${lngMin},${latMin},${lngMax},${latMax},4326)
              ${cto_id       ? tx`AND cl.cto_id        = ${cto_id}::uuid`                   : tx``}
              ${signal_status? tx`AND cl.signal_status = ${signal_status}::signal_status`    : tx``}
              ${impl_state   ? tx`AND cl.impl_state    = ${impl_state}::impl_state`          : tx``}
              ${search       ? tx`AND cl.name ILIKE ${'%'+search+'%'}`                       : tx``}
            ORDER BY cl.signal_status DESC NULLS LAST, cl.name
            LIMIT ${Number(limit)} OFFSET ${Number(offset)}
          `
        }
        return tx`
          SELECT
            cl.id, cl.name, cl.document, cl.email, cl.phone,
            cl.pppoe_login, cl.plan_name, cl.plan_speed_down,
            cl.ont_serial, cl.cto_id, cl.cto_port,
            cl.rx_power, cl.signal_status, cl.impl_state,
            cl.active, cl.erp_source, cl.erp_id, cl.tags,
            ST_AsGeoJSON(cl.location)::jsonb AS location,
            n.name AS cto_name
          FROM network_client cl
          LEFT JOIN optical_node n ON n.id = cl.cto_id
          WHERE cl.isp_id = ${req.isp_id}
            ${cto_id       ? tx`AND cl.cto_id        = ${cto_id}::uuid`                 : tx``}
            ${signal_status? tx`AND cl.signal_status = ${signal_status}::signal_status`  : tx``}
            ${impl_state   ? tx`AND cl.impl_state    = ${impl_state}::impl_state`        : tx``}
            ${erp_source   ? tx`AND cl.erp_source    = ${erp_source}`                   : tx``}
            ${search       ? tx`AND cl.name ILIKE ${'%'+search+'%'}`                     : tx``}
          ORDER BY cl.signal_status DESC NULLS LAST, cl.updated_at DESC
          LIMIT ${Number(limit)} OFFSET ${Number(offset)}
        `
      })
    )
  })

  // ── GET /clients/alerts ───────────────────────────────────────
  // Clientes com sinal crítico ou offline — para dashboard
  app.get('/clients/alerts', { preHandler: auth }, async (req) => {
    const cacheKey = `alerts:${req.isp_id}`
    return cached(cacheKey, 15, () =>
      withISP(req.isp_id, async (tx) => tx`
        SELECT
          cl.id, cl.name, cl.pppoe_login, cl.rx_power, cl.signal_status,
          cl.cto_id, cl.cto_port, cl.ont_serial,
          n.name AS cto_name,
          ST_AsGeoJSON(cl.location)::jsonb AS location
        FROM network_client cl
        LEFT JOIN optical_node n ON n.id = cl.cto_id
        WHERE cl.isp_id = ${req.isp_id}
          AND cl.active = true
          AND cl.signal_status IN ('critical','offline')
        ORDER BY
          CASE cl.signal_status WHEN 'offline' THEN 0 WHEN 'critical' THEN 1 ELSE 2 END,
          cl.rx_power ASC NULLS LAST
        LIMIT 100
      `)
    )
  })

  // ── GET /clients/:id ─────────────────────────────────────────
  app.get('/clients/:id', { preHandler: auth }, async (req, reply) => {
    const [client] = await withISP(req.isp_id, async (tx) => tx`
      SELECT
        cl.*,
        ST_AsGeoJSON(cl.location)::jsonb AS location,
        n.name AS cto_name, n.node_type AS cto_type,
        ST_AsGeoJSON(n.location)::jsonb AS cto_location,
        p.code AS pole_code, p.address AS pole_address
      FROM network_client cl
      LEFT JOIN optical_node n ON n.id = cl.cto_id
      LEFT JOIN pole p         ON p.id = cl.pole_id
      WHERE cl.id = ${req.params.id}::uuid AND cl.isp_id = ${req.isp_id}
    `)
    if (!client) return reply.code(404).send({ error: 'Cliente não encontrado' })
    return client
  })

  // ── GET /clients/:id/budget ───────────────────────────────────
  app.get('/clients/:id/budget', { preHandler: auth }, async (req) => {
    return sql`SELECT * FROM calc_optical_budget(${req.params.id}::uuid)`
  })

  // ── POST /clients ─────────────────────────────────────────────
  app.post('/clients', { preHandler: eng }, async (req, reply) => {
    const body = clientCreateSchema.safeParse(req.body)
    if (!body.success) return reply.code(400).send({ error: body.error.flatten() })

    const d = body.data
    const point = d.location
      ? `SRID=4326;POINT(${d.location.lng} ${d.location.lat})`
      : null

    const [client] = await withISP(req.isp_id, async (tx) => tx`
      INSERT INTO network_client (
        isp_id, project_id, name, document, email, phone, address,
        location, cto_id, cto_port, ont_serial,
        pppoe_login, plan_name, plan_speed_down, plan_speed_up,
        erp_id, erp_source, impl_state, tags, metadata
      ) VALUES (
        ${req.isp_id}, ${d.project_id ?? null},
        ${d.name}, ${d.document ?? null}, ${d.email ?? null},
        ${d.phone ?? null}, ${d.address ?? null},
        ${point}, ${d.cto_id ?? null}, ${d.cto_port ?? null},
        ${d.ont_serial ?? null}, ${d.pppoe_login ?? null},
        ${d.plan_name ?? null}, ${d.plan_speed_down ?? null}, ${d.plan_speed_up ?? null},
        ${d.erp_id ?? null}, ${d.erp_source ?? null},
        ${d.impl_state}::impl_state, ${d.tags}, ${JSON.stringify(d.metadata)}
      )
      RETURNING *, ST_AsGeoJSON(location)::jsonb AS location
    `)

    await invalidate(`clients:${req.isp_id}`)
    await invalidate(`tiles:${req.isp_id}`)
    return reply.code(201).send(client)
  })

  // ── PATCH /clients/:id ────────────────────────────────────────
  app.patch('/clients/:id', { preHandler: tech }, async (req, reply) => {
    const body = clientUpdateSchema.safeParse(req.body)
    if (!body.success) return reply.code(400).send({ error: body.error.flatten() })

    const d = body.data
    const point = d.location
      ? `SRID=4326;POINT(${d.location.lng} ${d.location.lat})`
      : undefined

    const [updated] = await withISP(req.isp_id, async (tx) => tx`
      UPDATE network_client SET
        name            = COALESCE(${d.name           ?? null}, name),
        document        = COALESCE(${d.document       ?? null}, document),
        email           = COALESCE(${d.email          ?? null}, email),
        phone           = COALESCE(${d.phone          ?? null}, phone),
        address         = COALESCE(${d.address        ?? null}, address),
        cto_id          = COALESCE(${d.cto_id         ?? null}, cto_id),
        cto_port        = COALESCE(${d.cto_port       ?? null}, cto_port),
        ont_serial      = COALESCE(${d.ont_serial     ?? null}, ont_serial),
        pppoe_login     = COALESCE(${d.pppoe_login    ?? null}, pppoe_login),
        plan_name       = COALESCE(${d.plan_name      ?? null}, plan_name),
        impl_state      = COALESCE(${d.impl_state ? tx`${d.impl_state}::impl_state` : tx`impl_state`}),
        tags            = COALESCE(${d.tags           ?? null}, tags),
        location        = COALESCE(${point ? tx`${point}::geometry` : tx`location`}),
        updated_at      = NOW()
      WHERE id = ${req.params.id}::uuid AND isp_id = ${req.isp_id}
      RETURNING *, ST_AsGeoJSON(location)::jsonb AS location
    `)

    if (!updated) return reply.code(404).send({ error: 'Cliente não encontrado' })
    await invalidate(`clients:${req.isp_id}`)
    await invalidate(`tiles:${req.isp_id}`)
    return updated
  })

  // ── PATCH /clients/:id/signal ─────────────────────────────────
  // Usado pelo sync do SmartOLT / Zabbix para atualizar sinal em tempo real
  app.patch('/clients/:id/signal', { preHandler: tech }, async (req, reply) => {
    const body = signalUpdateSchema.safeParse(req.body)
    if (!body.success) return reply.code(400).send({ error: body.error.flatten() })

    const { rx_power, signal_status, ont_serial } = body.data

    // Calcula status automaticamente se não vier explícito
    const status = signal_status ?? (
      rx_power === null   ? 'offline'  :
      rx_power > -8       ? 'saturated':
      rx_power >= -28     ? 'ok'       :
      rx_power >= -30     ? 'warning'  : 'critical'
    )

    const [updated] = await withISP(req.isp_id, async (tx) => tx`
      UPDATE network_client SET
        rx_power      = ${rx_power},
        signal_status = ${status}::signal_status,
        last_seen     = NOW(),
        ont_serial    = COALESCE(${ont_serial ?? null}, ont_serial),
        -- Se tinha "não implantado" e agora há sinal, atualiza automaticamente
        impl_state    = CASE
          WHEN impl_state = 'PLANEJADO' AND ${rx_power} IS NOT NULL
          THEN 'IMPLANTADO'::impl_state
          ELSE impl_state
        END,
        updated_at = NOW()
      WHERE id = ${req.params.id}::uuid AND isp_id = ${req.isp_id}
      RETURNING id, rx_power, signal_status, impl_state
    `)

    if (!updated) return reply.code(404).send({ error: 'Cliente não encontrado' })

    // Invalida cache de alertas mas não tiles (sinal não muda geometria)
    await invalidate(`alerts:${req.isp_id}`)
    await invalidate(`clients:${req.isp_id}`)
    await invalidate(`budget:${req.params.id}`)

    return updated
  })

  // ── POST /clients/bulk-signal ─────────────────────────────────
  // Atualização em lote vinda do Zabbix/SmartOLT (até 500 ONUs por vez)
  app.post('/clients/bulk-signal', { preHandler: tech }, async (req, reply) => {
    const updates = z.array(z.object({
      ont_serial:    z.string(),
      rx_power:      z.number().nullable(),
      signal_status: z.enum(['ok','warning','critical','saturated','offline']).optional(),
    })).max(500).safeParse(req.body)

    if (!updates.success) return reply.code(400).send({ error: updates.error.flatten() })

    let updated = 0
    for (const u of updates.data) {
      const status = u.signal_status ?? (
        u.rx_power === null ? 'offline'  :
        u.rx_power > -8     ? 'saturated':
        u.rx_power >= -28   ? 'ok'       :
        u.rx_power >= -30   ? 'warning'  : 'critical'
      )

      const [r] = await withISP(req.isp_id, async (tx) => tx`
        UPDATE network_client SET
          rx_power = ${u.rx_power}, signal_status = ${status}::signal_status,
          last_seen = NOW(), updated_at = NOW()
        WHERE ont_serial = ${u.ont_serial} AND isp_id = ${req.isp_id}
        RETURNING id
      `)
      if (r) updated++
    }

    await invalidate(`alerts:${req.isp_id}`)
    await invalidate(`clients:${req.isp_id}`)

    return { updated, total: updates.data.length }
  })

  // ── DELETE /clients/:id ───────────────────────────────────────
  app.delete('/clients/:id', { preHandler: [requireAuth, requireRole('admin')] }, async (req, reply) => {
    const [deleted] = await withISP(req.isp_id, async (tx) => tx`
      DELETE FROM network_client
      WHERE id = ${req.params.id}::uuid AND isp_id = ${req.isp_id}
      RETURNING id
    `)
    if (!deleted) return reply.code(404).send({ error: 'Cliente não encontrado' })
    await invalidate(`clients:${req.isp_id}`)
    await invalidate(`tiles:${req.isp_id}`)
    return { deleted: deleted.id }
  })
}
HEREDOC_EOF

mkdir -p "$(dirname "$INSTALL_DIR/src/routes/work_orders.js")"
cat > "$INSTALL_DIR/src/routes/work_orders.js" << 'HEREDOC_EOF'
// src/routes/work_orders.js
import { z } from 'zod'
import { sql, withISP, cached, invalidate } from '../config/db.js'
import { requireAuth, requireRole } from '../middleware/auth.js'

const auth = [requireAuth]
const tech = [requireAuth, requireRole('technician')]
const eng  = [requireAuth, requireRole('engineer')]

const checkItemSchema = z.object({
  id:   z.string(),
  text: z.string(),
  done: z.boolean().default(false),
})

const osCreateSchema = z.object({
  os_type:      z.enum(['instalacao','manutencao','suporte','vistoria','remocao']),
  title:        z.string().min(1),
  description:  z.string().optional(),
  client_id:    z.string().uuid().optional(),
  assigned_to:  z.string().uuid().optional(),
  address:      z.string().optional(),
  location:     z.object({ lat: z.number(), lng: z.number() }).optional(),
  scheduled_at: z.string().datetime().optional(),
  erp_id:       z.string().optional(),
  erp_source:   z.string().optional(),
  checklist:    z.array(checkItemSchema).default([]),
  metadata:     z.record(z.unknown()).default({}),
})

const osUpdateSchema = osCreateSchema.partial().extend({
  status: z.enum(['aberta','em_andamento','aguardando','concluida','cancelada']).optional(),
  notes:  z.string().optional(),
})

// Checklist padrão por tipo de OS
const DEFAULT_CHECKLISTS = {
  instalacao: [
    { id: '1', text: 'Conferir endereço e dados do cliente', done: false },
    { id: '2', text: 'Medir e lançar cabo drop', done: false },
    { id: '3', text: 'Instalar e configurar ONU', done: false },
    { id: '4', text: 'Verificar sinal Rx (≥ −28 dBm)', done: false },
    { id: '5', text: 'Testar conexão PPPoE', done: false },
    { id: '6', text: 'Fotografar instalação concluída', done: false },
    { id: '7', text: 'Coletar assinatura do cliente', done: false },
  ],
  manutencao: [
    { id: '1', text: 'Identificar causa do problema com OTDR', done: false },
    { id: '2', text: 'Limpar conectores SC/APC', done: false },
    { id: '3', text: 'Executar reparo / troca de componente', done: false },
    { id: '4', text: 'Verificar sinal Rx após reparo', done: false },
    { id: '5', text: 'Testar conexão do cliente', done: false },
    { id: '6', text: 'Fotografar componente trocado', done: false },
  ],
  suporte: [
    { id: '1', text: 'Verificar sinal Rx na ONT', done: false },
    { id: '2', text: 'Verificar status PPPoE no rádio', done: false },
    { id: '3', text: 'Checar roteador do cliente', done: false },
    { id: '4', text: 'Testar velocidade final', done: false },
    { id: '5', text: 'Confirmar resolução com cliente', done: false },
  ],
  vistoria: [
    { id: '1', text: 'Vistoriar poste / infraestrutura', done: false },
    { id: '2', text: 'Fotografar irregularidades', done: false },
    { id: '3', text: 'Registrar ocorrências', done: false },
  ],
  remocao: [
    { id: '1', text: 'Recolher ONU e equipamentos', done: false },
    { id: '2', text: 'Liberar porta na CTO', done: false },
    { id: '3', text: 'Registrar saída no sistema', done: false },
  ],
}

export default async function workOrderRoutes(app) {

  // ── GET /os ───────────────────────────────────────────────────
  app.get('/os', { preHandler: auth }, async (req) => {
    const {
      status, os_type, assigned_to,
      client_id, erp_source,
      date_from, date_to,
      limit = 50, offset = 0,
    } = req.query

    // Técnicos só veem as próprias OS; admins veem todas
    const filterUser = req.user.role === 'technician' ? req.user.id : (assigned_to ?? null)

    return cached(`os:${req.isp_id}:${JSON.stringify(req.query)}:${req.user.id}`, 10, () =>
      withISP(req.isp_id, async (tx) => tx`
        SELECT
          wo.id, wo.os_type, wo.status, wo.title, wo.description,
          wo.address, wo.scheduled_at, wo.started_at, wo.finished_at,
          wo.erp_id, wo.erp_source, wo.created_at, wo.updated_at,
          ST_AsGeoJSON(wo.location)::jsonb AS location,
          cl.name AS client_name, cl.pppoe_login, cl.rx_power, cl.signal_status,
          u.name AS assigned_name, u.email AS assigned_email
        FROM work_order wo
        LEFT JOIN network_client cl ON cl.id = wo.client_id
        LEFT JOIN app_user u        ON u.id  = wo.assigned_to
        WHERE wo.isp_id = ${req.isp_id}
          ${filterUser   ? tx`AND wo.assigned_to = ${filterUser}::uuid`             : tx``}
          ${status       ? tx`AND wo.status   = ${status}::os_status`               : tx``}
          ${os_type      ? tx`AND wo.os_type  = ${os_type}::os_type`                : tx``}
          ${client_id    ? tx`AND wo.client_id = ${client_id}::uuid`                : tx``}
          ${erp_source   ? tx`AND wo.erp_source = ${erp_source}`                    : tx``}
          ${date_from    ? tx`AND wo.scheduled_at >= ${date_from}::timestamptz`     : tx``}
          ${date_to      ? tx`AND wo.scheduled_at <= ${date_to}::timestamptz`       : tx``}
        ORDER BY
          CASE wo.status
            WHEN 'em_andamento' THEN 0
            WHEN 'aberta'       THEN 1
            WHEN 'aguardando'   THEN 2
            ELSE 3
          END,
          wo.scheduled_at ASC NULLS LAST,
          wo.created_at DESC
        LIMIT ${Number(limit)} OFFSET ${Number(offset)}
      `)
    )
  })

  // ── GET /os/:id ───────────────────────────────────────────────
  app.get('/os/:id', { preHandler: auth }, async (req, reply) => {
    const [os] = await withISP(req.isp_id, async (tx) => tx`
      SELECT
        wo.*,
        ST_AsGeoJSON(wo.location)::jsonb AS location,
        cl.name AS client_name, cl.document, cl.phone, cl.address AS client_address,
        cl.pppoe_login, cl.plan_name, cl.rx_power, cl.signal_status,
        cl.cto_id, cl.cto_port, cl.ont_serial,
        ST_AsGeoJSON(cl.location)::jsonb AS client_location,
        n.name AS cto_name,
        u.name AS assigned_name, u.email AS assigned_email, u.role AS assigned_role,
        cr.name AS created_by_name
      FROM work_order wo
      LEFT JOIN network_client cl ON cl.id  = wo.client_id
      LEFT JOIN optical_node   n  ON n.id   = cl.cto_id
      LEFT JOIN app_user       u  ON u.id   = wo.assigned_to
      LEFT JOIN app_user       cr ON cr.id  = wo.created_by
      WHERE wo.id = ${req.params.id}::uuid AND wo.isp_id = ${req.isp_id}
    `)
    if (!os) return reply.code(404).send({ error: 'OS não encontrada' })
    return os
  })

  // ── POST /os ──────────────────────────────────────────────────
  app.post('/os', { preHandler: tech }, async (req, reply) => {
    const body = osCreateSchema.safeParse(req.body)
    if (!body.success) return reply.code(400).send({ error: body.error.flatten() })

    const d = body.data
    const point = d.location
      ? `SRID=4326;POINT(${d.location.lng} ${d.location.lat})`
      : null

    // Usa checklist customizado ou o padrão do tipo
    const checklist = d.checklist.length > 0
      ? d.checklist
      : (DEFAULT_CHECKLISTS[d.os_type] ?? [])

    const [os] = await withISP(req.isp_id, async (tx) => tx`
      INSERT INTO work_order (
        isp_id, client_id, assigned_to, created_by,
        os_type, status, title, description,
        address, location, scheduled_at,
        erp_id, erp_source, checklist, metadata
      ) VALUES (
        ${req.isp_id},
        ${d.client_id     ?? null}, ${d.assigned_to   ?? null}, ${req.user.id},
        ${d.os_type}::os_type, 'aberta'::os_status,
        ${d.title}, ${d.description ?? null},
        ${d.address ?? null}, ${point},
        ${d.scheduled_at ?? null},
        ${d.erp_id ?? null}, ${d.erp_source ?? null},
        ${JSON.stringify(checklist)}, ${JSON.stringify(d.metadata)}
      )
      RETURNING *, ST_AsGeoJSON(location)::jsonb AS location
    `)

    await invalidate(`os:${req.isp_id}`)
    return reply.code(201).send(os)
  })

  // ── PATCH /os/:id ─────────────────────────────────────────────
  app.patch('/os/:id', { preHandler: tech }, async (req, reply) => {
    const body = osUpdateSchema.safeParse(req.body)
    if (!body.success) return reply.code(400).send({ error: body.error.flatten() })

    const d = body.data

    // Datas automáticas de transição de status
    const startedAt  = d.status === 'em_andamento' ? sql`NOW()` : sql`started_at`
    const finishedAt = d.status === 'concluida' || d.status === 'cancelada'
      ? sql`NOW()` : sql`finished_at`

    const point = d.location
      ? `SRID=4326;POINT(${d.location.lng} ${d.location.lat})`
      : undefined

    const [updated] = await withISP(req.isp_id, async (tx) => tx`
      UPDATE work_order SET
        status       = COALESCE(${d.status      ? tx`${d.status}::os_status` : tx`status`}),
        title        = COALESCE(${d.title       ?? null}, title),
        description  = COALESCE(${d.description ?? null}, description),
        assigned_to  = COALESCE(${d.assigned_to ?? null}, assigned_to),
        address      = COALESCE(${d.address     ?? null}, address),
        notes        = COALESCE(${d.notes       ?? null}, notes),
        checklist    = COALESCE(${d.checklist   ? JSON.stringify(d.checklist) : null}::jsonb, checklist),
        scheduled_at = COALESCE(${d.scheduled_at ?? null}, scheduled_at),
        location     = COALESCE(${point ? tx`${point}::geometry` : tx`location`}),
        started_at   = ${startedAt},
        finished_at  = ${finishedAt},
        updated_at   = NOW()
      WHERE id = ${req.params.id}::uuid AND isp_id = ${req.isp_id}
      RETURNING *, ST_AsGeoJSON(location)::jsonb AS location
    `)

    if (!updated) return reply.code(404).send({ error: 'OS não encontrada' })
    await invalidate(`os:${req.isp_id}`)
    return updated
  })

  // ── PATCH /os/:id/checklist ───────────────────────────────────
  // Atualiza item individual do checklist (usado pelo app mobile offline-first)
  app.patch('/os/:id/checklist/:item_id', { preHandler: tech }, async (req, reply) => {
    const { done } = req.body ?? {}
    if (typeof done !== 'boolean')
      return reply.code(400).send({ error: '"done" deve ser boolean' })

    const [updated] = await withISP(req.isp_id, async (tx) => tx`
      UPDATE work_order
      SET
        checklist = (
          SELECT jsonb_agg(
            CASE WHEN item->>'id' = ${req.params.item_id}
              THEN jsonb_set(item, '{done}', ${done}::text::jsonb)
              ELSE item
            END
          )
          FROM jsonb_array_elements(checklist) AS item
        ),
        updated_at = NOW()
      WHERE id = ${req.params.id}::uuid AND isp_id = ${req.isp_id}
      RETURNING id, checklist
    `)

    if (!updated) return reply.code(404).send({ error: 'OS não encontrada' })
    await invalidate(`os:${req.isp_id}`)
    return updated
  })

  // ── POST /os/sync-offline ─────────────────────────────────────
  // App mobile envia lote de atualizações feitas offline
  app.post('/os/sync-offline', { preHandler: tech }, async (req, reply) => {
    const batch = z.array(z.object({
      id:        z.string().uuid(),
      status:    z.enum(['aberta','em_andamento','aguardando','concluida','cancelada']).optional(),
      checklist: z.array(checkItemSchema).optional(),
      notes:     z.string().optional(),
      finished_at: z.string().datetime().optional(),
      attachments: z.array(z.object({ url: z.string(), caption: z.string().optional() })).optional(),
    })).max(50).safeParse(req.body)

    if (!batch.success) return reply.code(400).send({ error: batch.error.flatten() })

    const results = []
    for (const update of batch.data) {
      try {
        const [r] = await withISP(req.isp_id, async (tx) => tx`
          UPDATE work_order SET
            status      = COALESCE(${update.status ? tx`${update.status}::os_status` : tx`status`}),
            checklist   = COALESCE(${update.checklist ? JSON.stringify(update.checklist) : null}::jsonb, checklist),
            notes       = COALESCE(${update.notes ?? null}, notes),
            attachments = COALESCE(${update.attachments ? JSON.stringify(update.attachments) : null}::jsonb, attachments),
            finished_at = COALESCE(${update.finished_at ?? null}, finished_at),
            updated_at  = NOW()
          WHERE id = ${update.id}::uuid AND isp_id = ${req.isp_id}
          RETURNING id, status, updated_at
        `)
        results.push({ id: update.id, ok: true, ...(r ?? {}) })
      } catch (err) {
        results.push({ id: update.id, ok: false, error: err.message })
      }
    }

    await invalidate(`os:${req.isp_id}`)
    return { synced: results.filter(r => r.ok).length, results }
  })

  // ── DELETE /os/:id ────────────────────────────────────────────
  app.delete('/os/:id', { preHandler: [requireAuth, requireRole('admin')] }, async (req, reply) => {
    const [deleted] = await withISP(req.isp_id, async (tx) => tx`
      DELETE FROM work_order
      WHERE id = ${req.params.id}::uuid AND isp_id = ${req.isp_id}
      RETURNING id
    `)
    if (!deleted) return reply.code(404).send({ error: 'OS não encontrada' })
    await invalidate(`os:${req.isp_id}`)
    return { deleted: deleted.id }
  })

  // ── GET /os/stats ─────────────────────────────────────────────
  app.get('/os/stats', { preHandler: auth }, async (req) => {
    const [stats] = await withISP(req.isp_id, async (tx) => tx`
      SELECT
        COUNT(*) FILTER (WHERE status = 'aberta')       AS abertas,
        COUNT(*) FILTER (WHERE status = 'em_andamento') AS em_andamento,
        COUNT(*) FILTER (WHERE status = 'aguardando')   AS aguardando,
        COUNT(*) FILTER (WHERE status = 'concluida'
          AND finished_at >= NOW() - INTERVAL '24h')    AS concluidas_hoje,
        AVG(
          EXTRACT(EPOCH FROM (finished_at - started_at)) / 60
        ) FILTER (WHERE status = 'concluida'
          AND finished_at >= NOW() - INTERVAL '7d')     AS avg_duration_min
      FROM work_order
      WHERE isp_id = ${req.isp_id}
    `)
    return stats
  })
}
HEREDOC_EOF

mkdir -p "$(dirname "$INSTALL_DIR/src/routes/tiles.js")"
cat > "$INSTALL_DIR/src/routes/tiles.js" << 'HEREDOC_EOF'
// src/routes/tiles.js
import { sql, redis } from '../config/db.js'
import { requireAuth } from '../middleware/auth.js'

// TTL do cache por nível de zoom (tiles mais distantes mudam menos)
function tileTTL(z) {
  if (z <= 12) return 300  // 5 min — backbone raramente muda
  if (z <= 15) return 120  // 2 min
  return 30                // 30s — zoom alto, clientes mudam mais
}

export default async function tileRoutes(app) {

  // ── GET /tiles/:z/:x/:y.mvt ──────────────────────────────────
  app.get('/tiles/:z/:x/:y.mvt', { preHandler: [requireAuth] }, async (req, reply) => {
    const { z, x, y } = req.params
    const { project_id } = req.query
    const isp_id = req.isp_id

    const cacheKey = `tiles:${isp_id}:${project_id ?? 'all'}:${z}/${x}/${y}`

    // Tenta o cache primeiro
    const cached = await redis.getBuffer(cacheKey)
    if (cached) {
      return reply
        .code(200)
        .header('Content-Type', 'application/x-protobuf')
        .header('X-Cache', 'HIT')
        .header('Cache-Control', `public, max-age=${tileTTL(Number(z))}`)
        .send(cached)
    }

    // Gera o tile via PostGIS
    const [result] = await sql`
      SELECT get_mvt_tile(
        ${Number(z)}, ${Number(x)}, ${Number(y)},
        ${isp_id}::uuid,
        ${project_id ?? null}
      ) AS tile
    `

    const tile = result?.tile

    if (!tile || tile.length === 0) {
      // Tile vazio — retorna 204 para o cliente não tentar novamente
      return reply.code(204).send()
    }

    // Salva no cache
    await redis.setEx(cacheKey, tileTTL(Number(z)), tile)

    return reply
      .code(200)
      .header('Content-Type', 'application/x-protobuf')
      .header('X-Cache', 'MISS')
      .header('Cache-Control', `public, max-age=${tileTTL(Number(z))}`)
      .send(Buffer.from(tile))
  })

  // ── GET /tiles/invalidate ────────────────────────────────────
  // Chamado internamente após mutações (criar/mover/deletar elementos)
  app.post('/tiles/invalidate', { preHandler: [requireAuth] }, async (req, reply) => {
    const { lat, lng, radius_tiles = 3 } = req.body ?? {}
    const isp_id = req.isp_id

    if (lat !== undefined && lng !== undefined) {
      // Invalida apenas os tiles afetados pela área editada
      const affected = []
      for (let z = 10; z <= 20; z++) {
        const [tx, ty] = lngLatToTile(Number(lng), Number(lat), z)
        for (let dx = -radius_tiles; dx <= radius_tiles; dx++) {
          for (let dy = -radius_tiles; dy <= radius_tiles; dy++) {
            affected.push(`tiles:${isp_id}:*:${z}/${tx+dx}/${ty+dy}`)
          }
        }
      }
      // Redis SCAN + DEL por padrão
      for (const pattern of affected) {
        const keys = await redis.keys(pattern)
        if (keys.length) await redis.del(keys)
      }
      return { invalidated: affected.length }
    }

    // Sem coordenadas: invalida todos os tiles do ISP
    const keys = await redis.keys(`tiles:${isp_id}:*`)
    if (keys.length) await redis.del(keys)
    return { invalidated: keys.length }
  })
}

// Conversão lng/lat → tile XY
function lngLatToTile(lng, lat, z) {
  const n = Math.pow(2, z)
  const x = Math.floor((lng + 180) / 360 * n)
  const latRad = lat * Math.PI / 180
  const y = Math.floor((1 - Math.log(Math.tan(latRad) + 1 / Math.cos(latRad)) / Math.PI) / 2 * n)
  return [x, y]
}
HEREDOC_EOF

mkdir -p "$(dirname "$INSTALL_DIR/src/routes/monitoring.js")"
cat > "$INSTALL_DIR/src/routes/monitoring.js" << 'HEREDOC_EOF'
// src/routes/monitoring.js
import { z } from 'zod'
import { sql } from '../config/db.js'
import { requireAuth, requireRole } from '../middleware/auth.js'
import { can } from '../middleware/permissions.js'
import { syncZabbix, syncSmartOLT } from '../services/monitoring.js'

const adm  = [requireAuth, requireRole('admin')]
const auth = [requireAuth, can('monitoring', 'read')]

export default async function monitoringRoutes(app) {

  // ── GET /monitoring/summary ───────────────────────────────
  app.get('/monitoring/summary', { preHandler: [requireAuth] }, async (req) => {
    const isp_id = req.isp_id
    const [stats] = await sql`
      SELECT
        COUNT(*) FILTER (WHERE signal_status = 'ok')        AS online,
        COUNT(*) FILTER (WHERE signal_status = 'offline')   AS offline,
        COUNT(*) FILTER (WHERE signal_status = 'critical')  AS critical,
        COUNT(*) FILTER (WHERE signal_status = 'warning')   AS warning,
        COUNT(*) FILTER (WHERE signal_status = 'saturated') AS saturated,
        COUNT(*) FILTER (WHERE signal_status IS NULL)       AS unknown,
        COUNT(*)                                            AS total,
        ROUND(AVG(rx_power)::NUMERIC, 2)                   AS avg_rx_dbm,
        ROUND(MIN(rx_power)::NUMERIC, 2)                   AS min_rx_dbm,
        MAX(last_seen)                                      AS last_update
      FROM network_client
      WHERE isp_id = ${isp_id} AND active = true
    `
    return stats
  })

  // ── GET /monitoring/by-olt ────────────────────────────────
  app.get('/monitoring/by-olt', { preHandler: [requireAuth] }, async (req) => {
    return sql`
      SELECT
        n.id AS olt_id, n.name AS olt_name,
        COUNT(cl.id)                                               AS total_clients,
        COUNT(cl.id) FILTER (WHERE cl.signal_status = 'ok')       AS online,
        COUNT(cl.id) FILTER (WHERE cl.signal_status = 'offline')  AS offline,
        COUNT(cl.id) FILTER (WHERE cl.signal_status IN ('critical','warning')) AS alerts,
        ROUND(AVG(cl.rx_power)::NUMERIC, 2)                       AS avg_rx_dbm
      FROM optical_node n
      JOIN optical_node cto ON cto.parent_id = n.id OR cto.id = n.id
      JOIN network_client cl ON cl.cto_id = cto.id
      WHERE n.isp_id = ${req.isp_id} AND n.node_type = 'OLT'
      GROUP BY n.id, n.name
      ORDER BY alerts DESC, n.name
    `
  })

  // ── GET /monitoring/alerts ─────────────────────────────────
  app.get('/monitoring/alerts', { preHandler: [requireAuth] }, async (req) => {
    const { severity, limit = 50 } = req.query
    return sql`
      SELECT
        cl.id, cl.name, cl.pppoe_login, cl.ont_serial,
        cl.rx_power, cl.signal_status, cl.last_seen,
        cl.cto_id, cl.cto_port,
        n.name AS cto_name,
        ST_AsGeoJSON(cl.location)::jsonb AS location
      FROM network_client cl
      LEFT JOIN optical_node n ON n.id = cl.cto_id
      WHERE cl.isp_id = ${req.isp_id}
        AND cl.active = true
        AND cl.signal_status IN (
          ${severity === 'critical'  ? sql`'critical','offline'` :
            severity === 'warning'  ? sql`'warning'`             :
            sql`'critical','offline','warning'`}
        )
      ORDER BY
        CASE cl.signal_status
          WHEN 'offline'  THEN 0
          WHEN 'critical' THEN 1
          WHEN 'warning'  THEN 2 ELSE 3
        END,
        cl.rx_power ASC NULLS LAST
      LIMIT ${Number(limit)}
    `
  })

  // ── POST /monitoring/zabbix/config ────────────────────────
  app.post('/monitoring/zabbix/config', { preHandler: adm }, async (req, reply) => {
    const { url, login, password, status_item, power_item } = req.body ?? {}
    if (!url || !login || !password)
      return reply.code(400).send({ error: 'url, login e password obrigatórios' })

    await sql`
      UPDATE isp SET
        settings = jsonb_set(COALESCE(settings,'{}'), '{zabbix}',
          ${JSON.stringify({ url, login, password, status_item, power_item })}::jsonb)
      WHERE id = ${req.isp_id}
    `
    return { ok: true }
  })

  // ── POST /monitoring/zabbix/sync ──────────────────────────
  app.post('/monitoring/zabbix/sync', { preHandler: adm }, async (req, reply) => {
    try {
      return await syncZabbix(req.isp_id)
    } catch (err) {
      return reply.code(502).send({ error: err.message })
    }
  })

  // ── POST /monitoring/smartolt/config ──────────────────────
  app.post('/monitoring/smartolt/config', { preHandler: adm }, async (req, reply) => {
    const { url, api_key } = req.body ?? {}
    if (!url || !api_key) return reply.code(400).send({ error: 'url e api_key obrigatórios' })

    await sql`
      UPDATE isp SET
        settings = jsonb_set(COALESCE(settings,'{}'), '{smartolt}',
          ${JSON.stringify({ url, api_key })}::jsonb)
      WHERE id = ${req.isp_id}
    `
    return { ok: true }
  })

  // ── POST /monitoring/smartolt/sync ────────────────────────
  app.post('/monitoring/smartolt/sync', { preHandler: adm }, async (req, reply) => {
    try {
      return await syncSmartOLT(req.isp_id)
    } catch (err) {
      return reply.code(502).send({ error: err.message })
    }
  })

  // ── POST /monitoring/bulk-signal ──────────────────────────
  // Endpoint para SNMP poller externo enviar leituras em lote
  app.post('/monitoring/bulk-signal', {
    preHandler: [(req, reply) => {
      const key = req.headers['x-monitor-key']
      if (!key || key !== process.env.MONITOR_SECRET_KEY)
        return reply.code(401).send({ error: 'Monitor key inválida' })
    }]
  }, async (req) => {
    const updates = req.body ?? []
    const isp_id = req.headers['x-isp-id']
    if (!isp_id || !Array.isArray(updates)) return { error: 'isp_id e array obrigatórios' }

    let updated = 0
    for (const u of updates.slice(0, 1000)) {
      const status = u.signal_status ?? (
        u.rx_power === null ? 'offline' :
        u.rx_power > -8     ? 'saturated' :
        u.rx_power >= -28   ? 'ok' :
        u.rx_power >= -30   ? 'warning' : 'critical'
      )
      const [r] = await sql`
        UPDATE network_client SET
          rx_power = ${u.rx_power ?? null}, signal_status = ${status}::signal_status,
          last_seen = NOW(), updated_at = NOW()
        WHERE isp_id = ${isp_id} AND ont_serial = ${u.serial}
        RETURNING id
      `
      if (r) updated++
    }
    return { updated, total: updates.length }
  })
}
HEREDOC_EOF

mkdir -p "$(dirname "$INSTALL_DIR/src/routes/integrations/hubsoft.js")"
cat > "$INSTALL_DIR/src/routes/integrations/hubsoft.js" << 'HEREDOC_EOF'
// src/routes/integrations/hubsoft.js
import { sql, cached, invalidate } from '../../config/db.js'
import { requireAuth, requireRole } from '../../middleware/auth.js'

const auth = [requireAuth]
const adm  = [requireAuth, requireRole('admin')]

// ── Hubsoft OAuth2 helper ─────────────────────────────────────
async function getHubsoftToken(cfg) {
  const cacheKey = `hubsoft_token:${cfg.isp_id}`

  // Token em cache?
  const { redis } = await import('../../config/db.js')
  const cached = await redis.get(cacheKey)
  if (cached) return cached

  const res = await fetch(`${cfg.url}/oauth/token`, {
    method: 'POST',
    headers: { 'Content-Type': 'application/json' },
    body: JSON.stringify({
      grant_type:    'password',
      client_id:     cfg.client_id,
      client_secret: cfg.client_secret,
      username:      cfg.username,
      password:      cfg.password,
    }),
  })

  if (!res.ok) throw new Error(`Hubsoft auth falhou: ${res.status}`)
  const { access_token, expires_in } = await res.json()

  // Cache com 1 min de folga
  await redis.setEx(cacheKey, (expires_in ?? 3600) - 60, access_token)
  return access_token
}

async function hubsoftFetch(cfg, path, opts = {}) {
  const token = await getHubsoftToken(cfg)
  const res = await fetch(`${cfg.url}${path}`, {
    ...opts,
    headers: {
      Authorization: `Bearer ${token}`,
      'Content-Type': 'application/json',
      ...opts.headers,
    },
  })
  if (!res.ok) throw new Error(`Hubsoft API ${path}: ${res.status}`)
  return res.json()
}

// ── Carrega config do Hubsoft do banco ────────────────────────
async function getHubsoftConfig(isp_id) {
  const [isp] = await sql`SELECT settings FROM isp WHERE id = ${isp_id}`
  const cfg = isp?.settings?.hubsoft
  if (!cfg?.url || !cfg?.client_id) throw new Error('Hubsoft não configurado para este ISP')
  return { ...cfg, isp_id }
}

// ── Sync de clientes Hubsoft → LiberMap ──────────────────────
export async function syncHubsoftClients(isp_id) {
  const cfg = await getHubsoftConfig(isp_id)
  const log = []
  let page = 1, imported = 0, updated = 0, errors = 0

  while (true) {
    const { data, last_page } = await hubsoftFetch(cfg, `/api/v1/clientes?page=${page}&per_page=100&status=ativo`)

    for (const c of data) {
      try {
        const lat = Number(c.endereco?.latitude)
        const lng = Number(c.endereco?.longitude)
        const hasGeo = lat && lng && lat !== 0 && lng !== 0

        const [existing] = await sql`
          SELECT id FROM network_client
          WHERE isp_id = ${isp_id} AND erp_source = 'hubsoft' AND erp_id = ${String(c.id)}
        `

        if (existing) {
          await sql`
            UPDATE network_client SET
              name        = ${c.nome},
              document    = ${c.cpf_cnpj ?? null},
              email       = ${c.email ?? null},
              phone       = ${c.telefone ?? null},
              plan_name   = ${c.contrato?.plano?.nome ?? null},
              pppoe_login = ${c.pppoe_login ?? null},
              ont_serial  = ${c.onu_serial ?? null},
              active      = ${c.status === 'ativo'},
              location    = ${hasGeo ? `SRID=4326;POINT(${lng} ${lat})` : null},
              updated_at  = NOW()
            WHERE id = ${existing.id}
          `
          updated++
        } else {
          await sql`
            INSERT INTO network_client (
              isp_id, name, document, email, phone,
              plan_name, pppoe_login, ont_serial,
              erp_id, erp_source, active,
              location, address, impl_state
            ) VALUES (
              ${isp_id}, ${c.nome}, ${c.cpf_cnpj ?? null}, ${c.email ?? null}, ${c.telefone ?? null},
              ${c.contrato?.plano?.nome ?? null}, ${c.pppoe_login ?? null}, ${c.onu_serial ?? null},
              ${String(c.id)}, 'hubsoft', ${c.status === 'ativo'},
              ${hasGeo ? `SRID=4326;POINT(${lng} ${lat})` : null},
              ${c.endereco?.logradouro ?? null},
              'IMPLANTADO'::impl_state
            )
          `
          imported++
        }
      } catch (err) {
        errors++
        log.push({ type: 'error', client_id: c.id, msg: err.message })
      }
    }

    if (page >= last_page) break
    page++
  }

  // Atualiza timestamp de último sync
  await sql`
    UPDATE isp SET
      settings = jsonb_set(settings, '{hubsoft,last_sync}', ${JSON.stringify(new Date().toISOString())}::jsonb)
    WHERE id = ${isp_id}
  `

  await invalidate(`tiles:${isp_id}`)

  return { imported, updated, errors, pages: page, log }
}

// ── Envia porta CTO para o Hubsoft ───────────────────────────
export async function pushPortToHubsoft(isp_id, client_id) {
  const cfg = await getHubsoftConfig(isp_id)

  const [cl] = await sql`
    SELECT erp_id, cto_id, cto_port, rx_power
    FROM network_client
    WHERE id = ${client_id} AND isp_id = ${isp_id}
  `
  if (!cl?.erp_id) throw new Error('Cliente sem erp_id')

  const [cto] = await sql`SELECT name FROM optical_node WHERE id = ${cl.cto_id}`

  await hubsoftFetch(cfg, `/api/v1/clientes/${cl.erp_id}`, {
    method: 'PATCH',
    body: JSON.stringify({
      cto_name:  cto?.name ?? null,
      cto_port:  cl.cto_port,
      rx_power:  cl.rx_power,
    }),
  })

  return { ok: true, erp_id: cl.erp_id, cto: cto?.name, port: cl.cto_port }
}

// ── Rotas Fastify ──────────────────────────────────────────────
export default async function hubsoftRoutes(app) {

  // Configura credenciais
  app.post('/config', { preHandler: adm }, async (req, reply) => {
    const { url, client_id, client_secret, username, password } = req.body ?? {}
    if (!url || !client_id || !client_secret || !username || !password)
      return reply.code(400).send({ error: 'Todos os campos são obrigatórios' })

    await sql`
      UPDATE isp SET
        settings = jsonb_set(
          COALESCE(settings, '{}'),
          '{hubsoft}',
          ${JSON.stringify({ url, client_id, client_secret, username, password })}::jsonb
        )
      WHERE id = ${req.isp_id}
    `
    return { ok: true }
  })

  // Testa conexão
  app.get('/test', { preHandler: adm }, async (req, reply) => {
    try {
      const cfg = await getHubsoftConfig(req.isp_id)
      const token = await getHubsoftToken(cfg)
      return { ok: true, token_preview: token.slice(0, 12) + '…' }
    } catch (err) {
      return reply.code(502).send({ ok: false, error: err.message })
    }
  })

  // Sync manual
  app.post('/sync', { preHandler: adm }, async (req) => {
    return syncHubsoftClients(req.isp_id)
  })

  // Status da integração
  app.get('/status', { preHandler: auth }, async (req) => {
    const [isp] = await sql`SELECT settings FROM isp WHERE id = ${req.isp_id}`
    const hub = isp?.settings?.hubsoft ?? {}
    return {
      configured: !!(hub.url && hub.client_id),
      last_sync:  hub.last_sync ?? null,
      url:        hub.url ?? null,
    }
  })

  // Push de porta CTO para o Hubsoft
  app.post('/push-port/:client_id', { preHandler: auth }, async (req, reply) => {
    try {
      return await pushPortToHubsoft(req.isp_id, req.params.client_id)
    } catch (err) {
      return reply.code(502).send({ error: err.message })
    }
  })

  // Webhook: recebe eventos do Hubsoft
  app.post('/webhook', async (req, reply) => {
    const { event, data } = req.body ?? {}
    const isp_id = req.headers['x-isp-id']

    if (!isp_id) return reply.code(400).send({ error: 'x-isp-id obrigatório' })

    switch (event) {
      case 'cliente.novo':
      case 'cliente.alterado':
        // Re-sync do cliente específico
        await syncHubsoftClients(isp_id)  // otimizar: sync só o id
        break

      case 'os.aberta': {
        // Cria OS no LiberMap
        const [cl] = await sql`
          SELECT id FROM network_client
          WHERE erp_id = ${String(data.cliente_id)} AND erp_source = 'hubsoft' AND isp_id = ${isp_id}
        `
        if (cl) {
          await sql`
            INSERT INTO work_order (isp_id, client_id, os_type, status, title, erp_id, erp_source)
            VALUES (
              ${isp_id}, ${cl.id},
              'suporte'::os_type, 'aberta'::os_status,
              ${data.titulo ?? 'OS importada do Hubsoft'},
              ${String(data.id)}, 'hubsoft'
            )
          `
        }
        break
      }

      case 'contrato.suspenso': {
        await sql`
          UPDATE network_client SET active = false
          WHERE erp_id = ${String(data.cliente_id)} AND erp_source = 'hubsoft' AND isp_id = ${isp_id}
        `
        break
      }
    }

    return { received: true, event }
  })
}
HEREDOC_EOF

mkdir -p "$(dirname "$INSTALL_DIR/src/server.js")"
cat > "$INSTALL_DIR/src/server.js" << 'HEREDOC_EOF'
// src/server.js — LiberMap API v1.0 — completo
import 'dotenv/config'
import Fastify from 'fastify'
import jwt      from '@fastify/jwt'
import cors     from '@fastify/cors'
import helmet   from '@fastify/helmet'
import rateLimit from '@fastify/rate-limit'
import cookie   from '@fastify/cookie'
import swagger  from '@fastify/swagger'
import swaggerUI from '@fastify/swagger-ui'

import { sql }             from './config/db.js'
import licensePlugin       from './plugins/license.js'
import { startScheduler }  from './jobs/scheduler.js'

import authRoutes       from './routes/auth.js'
import userRoutes       from './routes/users.js'
import nodeRoutes       from './routes/nodes.js'
import cableRoutes      from './routes/cables.js'
import clientRoutes     from './routes/clients.js'
import osRoutes         from './routes/work_orders.js'
import tileRoutes       from './routes/tiles.js'
import monitoringRoutes from './routes/monitoring.js'
import hubsoftRoutes    from './routes/integrations/hubsoft.js'

const app = Fastify({
  logger: {
    level: process.env.LOG_LEVEL || 'info',
    ...(process.env.NODE_ENV !== 'production'
      ? { transport: { target: 'pino-pretty', options: { colorize: true } } }
      : {}),
  },
  trustProxy: true,
  bodyLimit:  5 * 1024 * 1024,
})

await app.register(helmet, { contentSecurityPolicy: false, crossOriginEmbedderPolicy: false })
await app.register(cors, { origin: process.env.CORS_ORIGIN?.split(',') || true, credentials: true })
await app.register(cookie, { secret: process.env.COOKIE_SECRET || 'CHANGE-ME' })
await app.register(jwt, { secret: process.env.JWT_SECRET || 'CHANGE-ME', sign: { algorithm: 'HS256' } })
await app.register(rateLimit, { global: true, max: 300, timeWindow: '1 minute', keyGenerator: r => r.user?.id || r.ip })

await app.register(swagger, {
  openapi: {
    info: { title: 'LiberMap API', description: 'Documentação de redes FTTH para ISPs', version: '1.0.0' },
    components: { securitySchemes: { bearerAuth: { type: 'http', scheme: 'bearer', bearerFormat: 'JWT' } } },
    security: [{ bearerAuth: [] }],
  },
})
await app.register(swaggerUI, { routePrefix: '/docs' })
await app.register(licensePlugin)

const API = '/api/v1'
app.register(authRoutes,       { prefix: API })
app.register(userRoutes,       { prefix: API })
app.register(nodeRoutes,       { prefix: API })
app.register(cableRoutes,      { prefix: API })
app.register(clientRoutes,     { prefix: API })
app.register(osRoutes,         { prefix: API })
app.register(tileRoutes,       { prefix: API })
app.register(monitoringRoutes, { prefix: API })
app.register(hubsoftRoutes,    { prefix: `${API}/integrations/hubsoft` })

app.get('/health', async () => ({ status: 'ok', version: '1.0.0', ts: new Date().toISOString() }))

app.addHook('onResponse', async (req, reply) => {
  const isp_id = req.user?.isp_id
  if (!isp_id) return
  sql`INSERT INTO access_log (isp_id,user_id,ip,method,path,status_code,duration_ms)
      VALUES (${isp_id},${req.user?.id??null},${req.ip}::inet,${req.method},${req.url},
              ${reply.statusCode},${Math.round(reply.getResponseTime())})`.catch(()=>{})
})

app.setErrorHandler((err, req, reply) => {
  req.log.error({ err }, 'error')
  if (err.validation) return reply.code(400).send({ error: 'Validação falhou', details: err.validation })
  if (err.statusCode)  return reply.code(err.statusCode).send({ error: err.message })
  return reply.code(500).send({ error: 'Erro interno' })
})

const PORT = Number(process.env.PORT) || 3000
try {
  await app.listen({ port: PORT, host: process.env.HOST || '0.0.0.0' })
  app.log.info(`LiberMap API → http://0.0.0.0:${PORT}`)
  startScheduler()
} catch (err) { app.log.error(err); process.exit(1) }

export default app
HEREDOC_EOF

ok "Arquivos instalados"

# ── 5. .env ───────────────────────────────────────────────────
step "Configuração de ambiente"
cat > "$INSTALL_DIR/.env" << ENVEOF
# LiberMap — Plataforma Libernet
# Gerado em: $(date)
# Versão: ${VERSION}
NODE_ENV=production
PORT=3000
HOST=0.0.0.0
LOG_LEVEL=info

# Banco de dados
PGHOST=localhost
PGPORT=5432
PGDATABASE=${PG_DB}
PGUSER=${PG_USER}
PGPASSWORD=${PGPASSWORD}

# Redis
REDIS_URL=redis://localhost:6379

# Autenticação
JWT_SECRET=${JWT_SECRET}
COOKIE_SECRET=${COOKIE_SECRET}

# Licença — Plataforma Libernet
LICENSE_SERVER_URL=${LICENSE_SERVER}
LICENSE_KEY=${LICENSE_KEY}
PUBLIC_DOMAIN=${PUBLIC_DOMAIN}

# Monitoramento
MONITOR_SECRET_KEY=${MONITOR_KEY}
MAPBOX_TOKEN=
ENVEOF
chmod 600 "$INSTALL_DIR/.env"

# Salva versão instalada
echo "$VERSION" > "$INSTALL_DIR/.version"
echo "$BASE_URL" > "$INSTALL_DIR/.update_url"
ok ".env gerado"

# ── 6. Migrations ─────────────────────────────────────────────
step "Banco de dados"
PGPASSWORD="$PGPASSWORD" psql -U "$PG_USER" -d "$PG_DB" \
  -f "$INSTALL_DIR/sql/migrations/001_base.sql" 2>/dev/null \
  && ok "Migration 001 aplicada" || warn "Migration 001 já aplicada ou com aviso"

PGPASSWORD="$PGPASSWORD" psql -U "$PG_USER" -d "$PG_DB" \
  -f "$INSTALL_DIR/sql/migrations/002_permissions_licenses.sql" 2>/dev/null \
  && ok "Migration 002 aplicada" || warn "Migration 002 já aplicada ou com aviso"

# ── 7. npm install ────────────────────────────────────────────
step "Dependências Node.js"
cd "$INSTALL_DIR" && npm install --production --silent
ok "Dependências instaladas"

# ── 8. ISP e admin ────────────────────────────────────────────
step "Dados iniciais"
if [[ "$REINSTALL" == "false" ]]; then
  ADMIN_HASH=$(node -e "
const b=require('bcrypt');
b.hash('$ADMIN_PASS',12).then(h=>process.stdout.write(h));
" 2>/dev/null)

  PGPASSWORD="$PGPASSWORD" psql -U "$PG_USER" -d "$PG_DB" << SQLEOF 2>/dev/null
INSERT INTO isp (slug, name, plan, settings)
VALUES (
  '${ISP_SLUG}', '${ISP_NAME}', 'basic',
  '{"license": {"status": "active", "key": "${LICENSE_KEY}"}}'::jsonb
) ON CONFLICT (slug) DO NOTHING;

INSERT INTO app_user (isp_id, name, email, password_hash, role)
SELECT id, 'Administrador', '${ADMIN_EMAIL}', '${ADMIN_HASH}', 'admin'
FROM isp WHERE slug = '${ISP_SLUG}'
ON CONFLICT DO NOTHING;
SQLEOF
  ok "ISP '${ISP_NAME}' e admin '${ADMIN_EMAIL}' criados"
else
  ok "Reinstalação — dados preservados"
fi

# ── 9. Permissões ─────────────────────────────────────────────
chown -R "$APP_USER:$APP_USER" "$INSTALL_DIR"
chmod 600 "$INSTALL_DIR/.env"

# ── 10. update.sh local ───────────────────────────────────────
# Salva o script de atualização no servidor do ISP
cat > "$INSTALL_DIR/update.sh" << 'UPDATEEOF'
#!/usr/bin/env bash
# LiberMap — Atualizador
# Baixa e aplica a versão mais recente disponível
# Uso: sudo bash /opt/libermap/update.sh
set -euo pipefail

RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'
CYAN='\033[0;36m'; BOLD='\033[1m'; NC='\033[0m'
ok(){  echo -e "${GREEN}[ OK ]${NC} $*"; }
info(){echo -e "${CYAN}[INFO]${NC} $*"; }
warn(){echo -e "${YELLOW}[WARN]${NC} $*"; }
err(){ echo -e "${RED}[ERRO]${NC} $*"; exit 1; }

[[ $EUID -ne 0 ]] && err "Execute como root: sudo bash /opt/libermap/update.sh"

INSTALL_DIR="/opt/libermap"
BASE_URL=$(cat "$INSTALL_DIR/.update_url" 2>/dev/null || echo "https://saas.libernet.com.br/instalador/libermap")
CURRENT=$(cat "$INSTALL_DIR/.version" 2>/dev/null || echo "0.0.0")

echo -e "${BOLD}${CYAN}"
echo "  LiberMap — Atualizador"
echo -e "${NC}"
info "Versão instalada: $CURRENT"
info "Verificando atualizações em $BASE_URL..."

# Busca versão disponível
VERSION_JSON=$(curl -fsSL "$BASE_URL/version.json" 2>/dev/null) || err "Não foi possível conectar ao servidor de atualizações"
LATEST=$(echo "$VERSION_JSON" | python3 -c "import sys,json; d=json.load(sys.stdin); print(d['version'])")
CHANGELOG=$(echo "$VERSION_JSON" | python3 -c "import sys,json; d=json.load(sys.stdin); [print('  •', c) for c in d['changelog']]")
MIGRATIONS=$(echo "$VERSION_JSON" | python3 -c "import sys,json; d=json.load(sys.stdin); [print(m) for m in d.get('migrations',[])]")
UPDATE_SCRIPT=$(echo "$VERSION_JSON" | python3 -c "import sys,json; d=json.load(sys.stdin); print(d['update_script'])")

if [[ "$LATEST" == "$CURRENT" ]]; then
  echo ""
  ok "LiberMap já está na versão mais recente ($CURRENT)"
  exit 0
fi

echo ""
echo -e "${BOLD}Nova versão disponível: ${CYAN}v$LATEST${NC} (atual: v$CURRENT)"
echo ""
echo "Novidades:"
echo "$CHANGELOG"
echo ""
read -rp "Confirmar atualização para v$LATEST? (s/N): " CONFIRM
[[ "$CONFIRM" =~ ^[Ss]$ ]] || { echo "Cancelado."; exit 0; }

# Baixa e executa o update.sh do servidor
TMP=$(mktemp -d)
trap "rm -rf $TMP" EXIT
curl -fsSL "$UPDATE_SCRIPT" -o "$TMP/update-remote.sh"
chmod +x "$TMP/update-remote.sh"
bash "$TMP/update-remote.sh" "$LATEST"
UPDATEEOF
chmod +x "$INSTALL_DIR/update.sh"
chown "$APP_USER:$APP_USER" "$INSTALL_DIR/update.sh"
ok "Script de atualização instalado em $INSTALL_DIR/update.sh"

# ── 11. Systemd ───────────────────────────────────────────────
step "Serviço systemd"
cat > /etc/systemd/system/libermap.service << SVCEOF
[Unit]
Description=LiberMap — Plataforma Libernet
Documentation=https://saas.libernet.com.br
After=network.target postgresql.service redis.service
Wants=postgresql.service redis.service

[Service]
Type=simple
User=${APP_USER}
WorkingDirectory=${INSTALL_DIR}
EnvironmentFile=${INSTALL_DIR}/.env
ExecStart=/usr/bin/node src/server.js
Restart=always
RestartSec=5
StandardOutput=append:${INSTALL_DIR}/logs/api.log
StandardError=append:${INSTALL_DIR}/logs/api-error.log
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
SVCEOF
systemctl daemon-reload && systemctl enable libermap
ok "Serviço libermap configurado"

# ── 12. Nginx ─────────────────────────────────────────────────
step "Nginx"
cat > /etc/nginx/sites-available/libermap << NGINXEOF
upstream libermap_api { server 127.0.0.1:3000; keepalive 32; }
server {
    listen 80;
    server_name ${PUBLIC_DOMAIN};
    client_max_body_size 10M;
    location / {
        proxy_pass         http://libermap_api;
        proxy_http_version 1.1;
        proxy_set_header   Upgrade           \$http_upgrade;
        proxy_set_header   Connection        "upgrade";
        proxy_set_header   Host              \$host;
        proxy_set_header   X-Real-IP         \$remote_addr;
        proxy_set_header   X-Forwarded-For   \$proxy_add_x_forwarded_for;
        proxy_set_header   X-Forwarded-Proto \$scheme;
        proxy_read_timeout 120s;
    }
    # Tiles MVT — cache curto no nginx
    location ~ ^/api/v1/geo/tiles/ {
        proxy_pass        http://libermap_api;
        proxy_cache_valid 200 30s;
        add_header        X-Cache-Status \$upstream_cache_status;
    }
}
NGINXEOF
ln -sf /etc/nginx/sites-available/libermap /etc/nginx/sites-enabled/libermap
rm -f /etc/nginx/sites-enabled/default 2>/dev/null || true
nginx -t >/dev/null 2>&1 && systemctl reload nginx
ok "Nginx configurado"

# ── 13. SSL ───────────────────────────────────────────────────
if [[ -n "$LE_EMAIL" && "$PUBLIC_DOMAIN" =~ \. ]] && \
   [[ "$PUBLIC_DOMAIN" != "localhost" ]] && \
   ! [[ "$PUBLIC_DOMAIN" =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
  step "SSL Let's Encrypt"
  certbot --nginx -d "$PUBLIC_DOMAIN" --email "$LE_EMAIL" \
    --agree-tos --non-interactive 2>/dev/null \
    && ok "SSL configurado" \
    || warn "SSL falhou — configure manualmente: sudo certbot --nginx"
fi

# ── 14. Firewall + Backup ─────────────────────────────────────
command -v ufw &>/dev/null && {
  ufw allow 22/tcp >/dev/null 2>&1 || true
  ufw allow 80/tcp >/dev/null 2>&1 || true
  ufw allow 443/tcp >/dev/null 2>&1 || true
  ufw --force enable >/dev/null 2>&1 || true
  ok "Firewall UFW configurado"
}

cat > /etc/cron.d/libermap-backup << CRONEOF
0 2 * * * ${APP_USER} PGPASSWORD=${PGPASSWORD} pg_dump -U ${PG_USER} ${PG_DB} | gzip > ${INSTALL_DIR}/backups/libermap_\$(date +\%Y\%m\%d).sql.gz && find ${INSTALL_DIR}/backups -name "*.gz" -mtime +30 -delete
CRONEOF
ok "Backup diário configurado (02:00)"

# ── 15. Iniciar ───────────────────────────────────────────────
step "Iniciando LiberMap"
systemctl start libermap
sleep 4
systemctl is-active --quiet libermap && ok "LiberMap rodando!" || \
  err "Falha — verifique: journalctl -u libermap -n 50"

sleep 2
curl -sf http://localhost:3000/health >/dev/null 2>&1 && ok "Health check OK" || \
  warn "Health ainda não respondeu — aguarde alguns segundos"

# ── Resumo ────────────────────────────────────────────────────
IP=$(hostname -I | awk '{print $1}')
echo ""
echo -e "${BOLD}${GREEN}══════════════════════════════════════════════════════════${NC}"
echo -e "${BOLD}${GREEN}  LiberMap v${VERSION} instalado com sucesso!${NC}"
echo -e "${BOLD}${GREEN}  Plataforma Libernet — saas.libernet.com.br${NC}"
echo -e "${BOLD}${GREEN}══════════════════════════════════════════════════════════${NC}"
echo ""
echo -e "  API:        ${CYAN}http://${PUBLIC_DOMAIN}/api/v1${NC}"
echo -e "  Docs:       ${CYAN}http://${PUBLIC_DOMAIN}/docs${NC}"
echo -e "  Health:     ${CYAN}http://${PUBLIC_DOMAIN}/health${NC}"
echo ""
echo -e "${BOLD}ISP:${NC}        ${ISP_NAME} (${ISP_SLUG})"
echo -e "${BOLD}Admin:${NC}      ${ADMIN_EMAIL}"
echo -e "${BOLD}Licença:${NC}    ${LICENSE_KEY}"
echo ""
echo -e "${BOLD}Atualizar:${NC}"
echo -e "  ${CYAN}sudo bash ${INSTALL_DIR}/update.sh${NC}"
echo ""
echo -e "${BOLD}Outros comandos:${NC}"
echo -e "  Status:    ${CYAN}systemctl status libermap${NC}"
echo -e "  Logs:      ${CYAN}journalctl -u libermap -f${NC}"
echo -e "  Restart:   ${CYAN}systemctl restart libermap${NC}"
echo ""
echo -e "${YELLOW}⚠  Guarde as credenciais do resumo abaixo!${NC}"
echo ""

# Salva resumo
cat > "$INSTALL_DIR/INSTALL_SUMMARY.txt" << SUMEOF
LiberMap — Plataforma Libernet
Instalado em: $(date)
Versão: ${VERSION}
ISP: ${ISP_NAME} (${ISP_SLUG})
Admin: ${ADMIN_EMAIL}
Domínio: ${PUBLIC_DOMAIN}
License Server: ${LICENSE_SERVER}
License Key: ${LICENSE_KEY}
Banco: ${PG_DB} / ${PG_USER} / ${PGPASSWORD}
JWT Secret: ${JWT_SECRET}
Monitor Key: ${MONITOR_KEY}
Atualizar: sudo bash ${INSTALL_DIR}/update.sh
SUMEOF
chmod 600 "$INSTALL_DIR/INSTALL_SUMMARY.txt"
chown "$APP_USER:$APP_USER" "$INSTALL_DIR/INSTALL_SUMMARY.txt"
info "Resumo salvo em: $INSTALL_DIR/INSTALL_SUMMARY.txt"
