Coach and chatbot fixes
This commit is contained in:
parent
f1ffd1ad67
commit
00cedaf0dc
@ -1 +1 @@
|
||||
c5704005f291ecf264cdc92403119e7db5831e61-372bcf506971f56c4911b429b9f5de5bc37ed008-e9eccd451b778829eb2f2c9752c670b707e1268b
|
||||
960faad1696b81c0f004065ea713edaef64ab816-372bcf506971f56c4911b429b9f5de5bc37ed008-e9eccd451b778829eb2f2c9752c670b707e1268b
|
||||
|
||||
@ -1744,6 +1744,13 @@ app.post('/api/chat/threads', authenticateUser, async (req, res) => {
|
||||
await pool.query(
|
||||
'INSERT INTO ai_chat_threads (id,user_id,bot_type,title) VALUES (?,?, "support", ?)',
|
||||
[id, userId, title]
|
||||
);
|
||||
// Seed a first assistant message so the drawer never appears blank
|
||||
const intro =
|
||||
'Hi — Aptiva Support here. I can help with CareerExplorer, account/billing, or technical issues. What do you need?';
|
||||
await pool.query(
|
||||
'INSERT INTO ai_chat_messages (thread_id,user_id,role,content) VALUES (?,?, "assistant", ?)',
|
||||
[id, userId, intro]
|
||||
);
|
||||
res.json({ id, title });
|
||||
});
|
||||
|
||||
@ -1347,39 +1347,43 @@ app.delete('/api/premium/career-profile/:careerProfileId', authenticatePremiumUs
|
||||
}
|
||||
});
|
||||
|
||||
app.post('/api/premium/ai/chat', authenticatePremiumUser, chatGate('coach'), async (req, res) => {
|
||||
try {
|
||||
const {
|
||||
userProfile = {},
|
||||
scenarioRow = {},
|
||||
financialProfile = {},
|
||||
collegeProfile = {},
|
||||
chatHistory = [],
|
||||
forceContext = false
|
||||
} = req.body;
|
||||
|
||||
let existingTitles = [];
|
||||
let miniGrid = "-none-"; // slim grid
|
||||
|
||||
app.post(
|
||||
'/api/premium/ai/chat',
|
||||
authenticatePremiumUser,
|
||||
chatGate('coach'),
|
||||
async (req, res) => {
|
||||
try {
|
||||
const [rows] = await pool.query(
|
||||
const {
|
||||
userProfile = {},
|
||||
scenarioRow = {},
|
||||
financialProfile = {},
|
||||
collegeProfile = {},
|
||||
chatHistory = [],
|
||||
forceContext = false,
|
||||
} = req.body;
|
||||
|
||||
`SELECT id, DATE_FORMAT(date,'%Y-%m-%d') AS d, title
|
||||
FROM milestones
|
||||
WHERE user_id = ? AND career_profile_id = ?`,
|
||||
[req.id, scenarioRow.id]
|
||||
);
|
||||
let existingTitles = [];
|
||||
let miniGrid = '-none-'; // slim grid for current milestones
|
||||
|
||||
existingTitles = rows.map(r => `${r.title.trim()}|${r.d}`);
|
||||
// Fetch existing milestones for this user + scenario
|
||||
try {
|
||||
const [rows] = await pool.query(
|
||||
`SELECT id, DATE_FORMAT(date,'%Y-%m-%d') AS d, title
|
||||
FROM milestones
|
||||
WHERE user_id = ? AND career_profile_id = ?`,
|
||||
[req.id, scenarioRow.id]
|
||||
);
|
||||
|
||||
if (rows.length) {
|
||||
miniGrid = rows
|
||||
.map(r => `${r.id}|${r.title.trim()}|${r.d}`)
|
||||
.join("\n");
|
||||
existingTitles = rows.map((r) => `${r.title.trim()}|${r.d}`);
|
||||
|
||||
if (rows.length) {
|
||||
miniGrid = rows
|
||||
.map((r) => `${r.id}|${r.title.trim()}|${r.d}`)
|
||||
.join('\n');
|
||||
}
|
||||
} catch (e) {
|
||||
console.error('Could not fetch existing milestones ⇒', e);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error("Could not fetch existing milestones ⇒", e);
|
||||
}
|
||||
|
||||
// ------------------------------------------------
|
||||
// 1. Helper Functions
|
||||
@ -1433,7 +1437,7 @@ app.post('/api/premium/ai/chat', authenticatePremiumUser, chatGate('co
|
||||
|
||||
// Friendly note - feel free to tweak the wording
|
||||
const friendlyNote = `
|
||||
Feel free to use AptivaAI however it best suits you—there’s no "wrong" answer.
|
||||
Feel free to use AptivaAI however it best suits you—there’s no "wrong" answer but Coach is designed to provide you with actionable steps towards your goals.
|
||||
It doesn’t matter so much where you've been; it's about where you want to go from here.
|
||||
We can refine details any time or jump straight to what you’re most eager to explore right now.
|
||||
|
||||
@ -1445,6 +1449,22 @@ I'm here to support you with personalized coaching—what would you like to focu
|
||||
return `${combinedDescription}\n\n${friendlyNote}`;
|
||||
}
|
||||
|
||||
// One‑shot interview helpers
|
||||
const getLastAssistant = (hist) => {
|
||||
if (!Array.isArray(hist)) return null;
|
||||
for (let i = hist.length - 1; i >= 0; i--) {
|
||||
const m = hist[i];
|
||||
if (m && m.role === 'assistant' && typeof m.content === 'string') return m;
|
||||
}
|
||||
return null;
|
||||
};
|
||||
const stripTags = (s='') => s.replace(/<[^>]+>/g, '');
|
||||
const lastAssistant = getLastAssistant(chatHistory);
|
||||
const lastAssistantText = stripTags(lastAssistant?.content || '');
|
||||
const lastAssistantIsOneShotQ =
|
||||
/<!--\s*interview-oneshot-q\s*-->/.test(lastAssistant?.content || '') ||
|
||||
/^Interview question\s*:/i.test(lastAssistantText);
|
||||
|
||||
// B. Build a user summary that references all available info (unchanged from your code)
|
||||
function buildUserSummary({
|
||||
userProfile = {},
|
||||
@ -1633,7 +1653,6 @@ ${econText}
|
||||
// ------------------------------------------------
|
||||
// 2. AI Risk Fetch
|
||||
// ------------------------------------------------
|
||||
const apiBase = process.env.APTIVA_INTERNAL_API || "http://localhost:5002/api";
|
||||
let aiRisk = null;
|
||||
try {
|
||||
const aiRiskRes = await auth(
|
||||
@ -1713,8 +1732,10 @@ Our mission is to help people grow *with* AI rather than be displaced by it.
|
||||
Speak in a warm, encouraging tone, but prioritize *specific next steps* over generic motivation.
|
||||
Validate ambitions, break big goals into realistic milestones, and show how AI can be a collaborator.
|
||||
|
||||
Finish every reply with **one concrete suggestion or question** that moves the plan forward.
|
||||
Finish every reply with **one concise follow‑up** that stays on the **same topic as the user’s last message**.
|
||||
Do **not** propose or ask about roadmaps/milestones/interviews unless the user **explicitly asked** (or pressed a quick‑action button).
|
||||
Never ask for info you already have unless you truly need clarification.
|
||||
|
||||
`.trim();
|
||||
|
||||
|
||||
@ -1800,6 +1821,17 @@ ${combinedStatusSituation}
|
||||
${summaryText}
|
||||
`.trim();
|
||||
|
||||
const systemPromptDirectQA = `
|
||||
Answer the user's **last message directly and specifically**.
|
||||
|
||||
Rules:
|
||||
• Start with the direct answer in 1–3 short paragraphs or a clear bullet list.
|
||||
• Stay on the user’s topic; do **not** propose or ask about roadmaps, milestones, or interviews unless the user explicitly asked in the last turn.
|
||||
• Use the user's region (${userProfile?.area || userProfile?.state || 'their region'}) when location matters.
|
||||
• If key data is missing, say what you need and ask **one** precise follow‑up.
|
||||
• Avoid repeating the same sentence you used previously (e.g., “I can create a roadmap…”).`.trim();
|
||||
|
||||
|
||||
const dynMilestonePrompt = `
|
||||
[CURRENT MILESTONES]
|
||||
Use **exactly** the UUID at the start of each line when you refer to a milestone
|
||||
@ -1868,59 +1900,206 @@ const systemPromptDateGuard = `
|
||||
Every milestone “date” must be **on or after** ${todayISO}.
|
||||
If you’re asked for short-term dates, they still must be ≥ ${todayISO}.
|
||||
Reject or re-ask if the user insists on a past date.
|
||||
`.trim();
|
||||
`.trim();
|
||||
|
||||
const avoidBlock = existingTitles.length
|
||||
? "\nAVOID any milestone whose title matches REGEXP /" +
|
||||
existingTitles.map(t => `(?:${t.split("|")[0].replace(/[.*+?^${}()|[\]\\]/g,"\\$&")})`)
|
||||
.join("|") + "/i"
|
||||
: "";
|
||||
const avoidBlock =
|
||||
existingTitles.length
|
||||
? `\nAVOID any milestone whose title matches REGEXP /${existingTitles
|
||||
.map((t) =>
|
||||
`(?:${t.split('|')[0].replace(/[.*+?^${}()|[\]\\]/g, '\\$&')})`
|
||||
)
|
||||
.join('|')}/i`
|
||||
: '';
|
||||
|
||||
const MAX_TURNS = 20;
|
||||
const messagesToSend = [];
|
||||
|
||||
const recentHistory = chatHistory.slice(-MAX_CHAT_TURNS);
|
||||
// Intro card only at conversation start (lightweight)
|
||||
if ((chatHistory?.length || 0) < 2) {
|
||||
messagesToSend.push({ role: "system", content: systemPromptIntro });
|
||||
}
|
||||
|
||||
const firstTurn = chatHistory.length === 0;
|
||||
// Always include detailed context at start or when explicitly requested
|
||||
if ((chatHistory?.length || 0) < 2 || forceContext) {
|
||||
messagesToSend.push({ role: "system", content: systemPromptDetailedContext });
|
||||
}
|
||||
|
||||
const STATIC_SYSTEM_CARD = `
|
||||
${systemPromptIntro}
|
||||
// Per-turn, always small helpers
|
||||
messagesToSend.push(
|
||||
{ role: "system", content: systemPromptStatusSituation },
|
||||
);
|
||||
|
||||
${systemPromptOpsCheatSheet}
|
||||
// Is the latest turn a Quick Action (we inject a transient MODE:... system card)?
|
||||
const lastTurn = Array.isArray(chatHistory) ? chatHistory[chatHistory.length - 1] : null;
|
||||
|
||||
/* Milestone JSON spec, date guard, and avoid-list */
|
||||
${systemPromptMilestoneFormat}
|
||||
${systemPromptDateGuard}
|
||||
${avoidBlock}
|
||||
`.trim();
|
||||
// Find the last user message content (plain string)
|
||||
const lastUserMsg = (() => {
|
||||
if (!Array.isArray(chatHistory)) return '';
|
||||
for (let i = chatHistory.length - 1; i >= 0; i--) {
|
||||
const m = chatHistory[i];
|
||||
if (m && m.role === 'user' && typeof m.content === 'string') {
|
||||
return m.content.trim();
|
||||
}
|
||||
}
|
||||
return '';
|
||||
})();
|
||||
|
||||
const NEEDS_OPS_CARD = !chatHistory.some(
|
||||
m => m.role === "system" && m.content.includes("APTIVA OPS CHEAT-SHEET")
|
||||
);
|
||||
// Very small intent heuristic for "info questions" (salary, duties, etc.)
|
||||
const INFO_Q_RX = /\b(salary|pay|compensation|starting\s*salary|median|range|job\s*description|dut(y|ies)|requirements?|skills|responsibilit(y|ies)|what\s+is|how\s+much|how\s+do)\b/i;
|
||||
const isInfoQuestion = !!lastUserMsg && INFO_Q_RX.test(lastUserMsg);
|
||||
|
||||
const NEEDS_CTX_CARD = !chatHistory.some(
|
||||
m => m.role === "system" && m.content.startsWith("[DETAILED USER PROFILE]")
|
||||
);
|
||||
// —— Interview state helpers ——
|
||||
|
||||
const SEND_CTX_CARD = forceContext || NEEDS_CTX_CARD;
|
||||
|
||||
|
||||
const messagesToSend = [];
|
||||
|
||||
// ① Large, unchanging card – once per conversation
|
||||
if (NEEDS_OPS_CARD) {
|
||||
messagesToSend.push({ role: "system", content: STATIC_SYSTEM_CARD });
|
||||
// Find most recent "Starting mock interview" assistant note (persisted by frontend)
|
||||
function findInterviewStartIdx(history) {
|
||||
const h = Array.isArray(history) ? history : [];
|
||||
for (let i = h.length - 1; i >= 0; i--) {
|
||||
const m = h[i];
|
||||
if (m.role === 'assistant' && /Starting mock interview/i.test(m.content || '')) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (NEEDS_CTX_CARD || SEND_CTX_CARD)
|
||||
messagesToSend.push({ role:"system", content: summaryText });
|
||||
// Count questions asked and capture the last asked text
|
||||
function extractInterviewProgress(history) {
|
||||
const asked = [];
|
||||
for (const m of history) {
|
||||
if (m.role !== 'assistant' || typeof m.content !== 'string') continue;
|
||||
const t = m.content.replace(/<[^>]+>/g, ''); // strip tags
|
||||
// Accept several formats: "Next question:", "**Question 3:**", "Question 3:"
|
||||
const rx = /(?:^|\n)\s*(?:Next question:|\*{0,2}\s*Question\s+\d+\s*:)\s*(.+)/gi;
|
||||
let match;
|
||||
while ((match = rx.exec(t))) asked.push(match[1].trim());
|
||||
}
|
||||
const lastAskedText = asked.length ? asked[asked.length - 1] : '';
|
||||
return { askedCount: asked.length, lastAskedText };
|
||||
}
|
||||
|
||||
// ② Per-turn contextual helpers (small!)
|
||||
messagesToSend.push(
|
||||
{ role: "system", content: systemPromptStatusSituation },
|
||||
{ role: "system", content: dynMilestonePrompt } // <-- grid replaces two old lines
|
||||
function wasInterviewJustEnded(history) {
|
||||
return (history || []).some(
|
||||
(m) => m.role === 'assistant' && /<!--\s*interview-complete\s*-->/i.test(m.content || '')
|
||||
);
|
||||
}
|
||||
|
||||
// Last assistant message (to prevent repeating the pitch)
|
||||
const lastAssistantMsg = (() => {
|
||||
if (!Array.isArray(chatHistory)) return '';
|
||||
for (let i = chatHistory.length - 1; i >= 0; i--) {
|
||||
const m = chatHistory[i];
|
||||
if (m && m.role === 'assistant' && typeof m.content === 'string') {
|
||||
return m.content.trim();
|
||||
}
|
||||
}
|
||||
return '';
|
||||
})();
|
||||
const ASSISTANT_PITCHED_PLAN = /roadmap|milestone|plan\b/i.test(lastAssistantMsg);
|
||||
|
||||
// Parse MODE on the last transient system card (if any)
|
||||
const modeMatch =
|
||||
lastTurn && lastTurn.role === 'system'
|
||||
? /MODE\s*:\s*([A-Za-z_]+)/i.exec(lastTurn.content || '')
|
||||
: null;
|
||||
const MODE = (modeMatch ? modeMatch[1] : '').toLowerCase();
|
||||
|
||||
// One‑shot *start* (ask exactly one question)
|
||||
const IS_ONESHOT_START = MODE === 'interview_one_shot';
|
||||
// One‑shot *evaluation* (user replied to last one‑shot question)
|
||||
const IS_ONESHOT_EVAL = !IS_ONESHOT_START && lastAssistantIsOneShotQ;
|
||||
|
||||
const PLAN_MODES = new Set(['networking_plan', 'job_search_plan', 'ai_growth']);
|
||||
|
||||
const IS_PLAN_TURN = PLAN_MODES.has(MODE);
|
||||
const IS_INTERVIEW_TURN = MODE === 'interview';
|
||||
|
||||
// Detect an ongoing interview even if the MODE card wasn’t sent this turn
|
||||
const recent = (chatHistory || []).slice(-20);
|
||||
const hasRecentInterviewCard = recent.some(
|
||||
(m) => m.role === 'system' && /MODE\s*:\s*interview/i.test(m.content || '')
|
||||
);
|
||||
const lastAssistantRecent = [...recent].reverse().find((m) => m.role === 'assistant');
|
||||
const assistantLooksInterview =
|
||||
lastAssistantRecent &&
|
||||
/(Next question:|^\s*\*{0,2}Question\s+\d+\s*:|Starting mock interview)/i.test(
|
||||
(lastAssistantRecent.content || '').replace(/<[^>]+>/g, '')
|
||||
);
|
||||
const interviewEndedRecently = wasInterviewJustEnded(chatHistory);
|
||||
|
||||
// Active session?
|
||||
const IN_INTERVIEW_SESSION = (IS_INTERVIEW_TURN || hasRecentInterviewCard || assistantLooksInterview) && !interviewEndedRecently;
|
||||
|
||||
// Interview progress + next index
|
||||
const interviewStartIdx = findInterviewStartIdx(chatHistory);
|
||||
const { askedCount: interviewQCount, lastAskedText } = extractInterviewProgress(chatHistory);
|
||||
const nextQNumber = Math.min(5, interviewQCount + 1);
|
||||
const userQuitInterview = /\b(quit|end|stop)\s+interview\b/i.test(lastUserMsg || '');
|
||||
const INTERVIEW_COMPLETE = IN_INTERVIEW_SESSION && (interviewQCount >= 5 || userQuitInterview);
|
||||
|
||||
// Build the model window: either the interview segment, or the last MAX_TURNS
|
||||
|
||||
const INTERVIEW_SEGMENT_CAP = 100; // ~ ample for 5 Q/A pairs
|
||||
const windowForModel = (() => {
|
||||
if (IN_INTERVIEW_SESSION && interviewStartIdx !== -1) {
|
||||
const start = Math.max(interviewStartIdx, chatHistory.length - INTERVIEW_SEGMENT_CAP);
|
||||
return chatHistory.slice(start);
|
||||
}
|
||||
return (chatHistory || []).slice(-MAX_TURNS);
|
||||
})();
|
||||
messagesToSend.push(...windowForModel);
|
||||
|
||||
// Guards by mode
|
||||
let willAppendInterviewCompleteTag = false;
|
||||
|
||||
if (IS_ONESHOT_START) {
|
||||
// Hard guard: output *exactly* one question + sentinel comment
|
||||
const oneShotAsk = `
|
||||
You are generating one interview question only.
|
||||
Output EXACTLY:
|
||||
Interview question: <one specific question>
|
||||
<!-- interview-oneshot-q -->
|
||||
No other text, no scoring, no numbering, no follow‑ups.
|
||||
`.trim();
|
||||
messagesToSend.push({ role: 'system', content: oneShotAsk });
|
||||
|
||||
} else if (IS_ONESHOT_EVAL) {
|
||||
// Evaluate the user's last message against the immediately preceding question
|
||||
const oneShotEval = `
|
||||
Evaluate the user's most recent message as an answer to the immediately preceding interview question.
|
||||
Write exactly these four lines (plain text, no markdown):
|
||||
Score: X/5
|
||||
Why: <1–2 sentences>
|
||||
Strength: <short phrase>
|
||||
Improve: <one targeted suggestion>
|
||||
Do not ask another question. Do not propose roadmaps or milestones.
|
||||
<!-- interview-oneshot-done -->
|
||||
`.trim();
|
||||
messagesToSend.push({ role: 'system', content: oneShotEval });
|
||||
|
||||
} else if (IS_PLAN_TURN) {
|
||||
|
||||
messagesToSend.push(
|
||||
{ role: "system", content: dynMilestonePrompt },
|
||||
{ role: "system", content: `${systemPromptMilestoneFormat}\n${systemPromptDateGuard}${avoidBlock}` }
|
||||
);
|
||||
|
||||
} else {
|
||||
// Normal chat guard (your existing one)
|
||||
const systemPromptNormalReplyGuard = `
|
||||
You are in NORMAL CHAT mode (not plan creation).
|
||||
1) Answer the user's latest message directly in the first paragraph.
|
||||
2) Do NOT suggest creating a roadmap or milestones unless the user explicitly asks.
|
||||
3) Do NOT repeat the same offer or question from your previous reply.
|
||||
4) Keep it concise; add one helpful next question only if needed.
|
||||
${ASSISTANT_PITCHED_PLAN ? `5) Your last reply mentioned a roadmap/milestones — do not repeat that. Answer the user’s question directly.` : ``}
|
||||
${isInfoQuestion ? `6) The user asked an information question ("${lastUserMsg.slice(0,160)}"). Provide the requested facts. If you lack exact data, say so and ask ONE precise follow-up.` : ``}
|
||||
`.trim();
|
||||
messagesToSend.push({ role: "system", content: systemPromptNormalReplyGuard });
|
||||
}
|
||||
|
||||
// Recent conversational context (trimmed)
|
||||
messagesToSend.push(...(chatHistory || []).slice(-MAX_TURNS));
|
||||
|
||||
// ③ Recent conversational context
|
||||
messagesToSend.push(...chatHistory.slice(-MAX_CHAT_TURNS));
|
||||
|
||||
// ------------------------------------------------
|
||||
// 6. Call GPT (unchanged)
|
||||
@ -1941,14 +2120,31 @@ messagesToSend.push(...chatHistory.slice(-MAX_CHAT_TURNS));
|
||||
reply: "Sorry, I didn't get a response. Could you please try again?"
|
||||
});
|
||||
}
|
||||
|
||||
/* 🔹 NEW: detect fenced ```ops``` JSON */
|
||||
let opsConfirmations = [];
|
||||
// Prepare containers BEFORE we do any parsing so TDZ can't bite us
|
||||
let createdMilestonesData = [];
|
||||
let opsConfirmations = [];
|
||||
/* 🔹 detect fenced ```ops``` JSON */
|
||||
const opsMatch = rawReply.match(/```ops\s*([\s\S]*?)```/i);
|
||||
if (opsMatch) {
|
||||
try {
|
||||
const opsObj = JSON.parse(opsMatch[1]);
|
||||
opsConfirmations = await applyOps(opsObj, req);
|
||||
|
||||
// NEW: if there were CREATE ops, reflect that back to the client
|
||||
if (Array.isArray(opsObj?.milestones)) {
|
||||
const createdOps = opsObj.milestones.filter(
|
||||
(m) => String(m.op || '').toUpperCase() === 'CREATE'
|
||||
);
|
||||
if (createdOps.length) {
|
||||
createdMilestonesData.push(
|
||||
...createdOps.map((m) => ({
|
||||
milestoneId: null,
|
||||
title: m?.data?.title || ''
|
||||
}))
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
} catch (e) {
|
||||
console.error("Could not parse ops JSON:", e);
|
||||
}
|
||||
@ -1958,6 +2154,16 @@ if (opsMatch) {
|
||||
let visibleReply = rawReply.replace(/```ops[\s\S]*?```/i, "").trim();
|
||||
if (!visibleReply) visibleReply = "Done!";
|
||||
|
||||
// Normalize interview labels if the model tried to enumerate
|
||||
visibleReply = visibleReply
|
||||
.replace(/Here'?s your first question\s*:\s*/ig, 'Interview question: ')
|
||||
.replace(/\*\*?\s*Question\s*\d+\s*:\s*/ig, 'Interview question: ');
|
||||
|
||||
// If we just asked for a wrap-up, tag this message as complete so future turns exit interview mode
|
||||
if (willAppendInterviewCompleteTag) {
|
||||
visibleReply = `${visibleReply}\n<!-- interview-complete -->`;
|
||||
}
|
||||
|
||||
/* If we executed any ops, append a quick summary */
|
||||
if (opsConfirmations.length) {
|
||||
visibleReply +=
|
||||
@ -1972,7 +2178,6 @@ if (opsConfirmations.length) {
|
||||
|
||||
// 5) Default: Just return raw text to front-end
|
||||
let replyToClient = visibleReply;
|
||||
let createdMilestonesData = [];
|
||||
|
||||
// ── NEW: pull out the first JSON object/array even if text precedes it ──
|
||||
const firstBrace = rawReply.search(/[{\[]/); // first “{” or “[”
|
||||
@ -1985,29 +2190,46 @@ if (firstBrace !== -1 && lastJsonEdge > firstBrace) {
|
||||
embeddedJson = rawReply.slice(firstBrace, lastJsonEdge + 1).trim();
|
||||
}
|
||||
|
||||
// … then change the existing check:
|
||||
if (embeddedJson) { // <── instead of startsWith("{")…
|
||||
try {
|
||||
const planObj = JSON.parse(embeddedJson);
|
||||
if (embeddedJson) {
|
||||
try {
|
||||
const planObj = JSON.parse(embeddedJson);
|
||||
if (planObj && Array.isArray(planObj.milestones)) {
|
||||
const batchSeen = new Set();
|
||||
|
||||
// The AI plan is expected to have: planObj.milestones[]
|
||||
if (planObj && Array.isArray(planObj.milestones)) {
|
||||
for (const milestone of planObj.milestones) {
|
||||
const dupKey = `${(milestone.title || "").trim()}|${milestone.date}`;
|
||||
if (existingTitles.includes(dupKey)) {
|
||||
console.log("Skipping duplicate milestone:", dupKey);
|
||||
continue; // do NOT insert
|
||||
for (const milestone of planObj.milestones) {
|
||||
const tNorm = String(milestone.title || '').trim().toLowerCase();
|
||||
if (!tNorm) continue;
|
||||
|
||||
if (batchSeen.has(tNorm)) {
|
||||
console.log('Skipping in-batch duplicate title:', milestone.title);
|
||||
continue;
|
||||
}
|
||||
// Create the milestone
|
||||
const milestoneBody = {
|
||||
title: milestone.title,
|
||||
description: milestone.description || "",
|
||||
date: milestone.date,
|
||||
career_profile_id: scenarioRow.id, // or scenarioRow.career_profile_id
|
||||
status: "planned",
|
||||
progress: 0,
|
||||
is_universal: false
|
||||
};
|
||||
batchSeen.add(tNorm);
|
||||
|
||||
const dupKey = `${(milestone.title || '').trim()}|${milestone.date}`;
|
||||
if (existingTitles.includes(dupKey)) {
|
||||
console.log('Skipping duplicate milestone:', dupKey);
|
||||
continue; // do NOT insert
|
||||
}
|
||||
|
||||
// Skip generic titles
|
||||
const GENERIC_RX =
|
||||
/^(complete (leadership|management) (training|course|program)|seek mentorship|network( with|ing)?( (current|senior|management) (managers|professionals))?|attend (a )?networking event)$/i;
|
||||
if (GENERIC_RX.test(milestone.title || '')) {
|
||||
console.log('Skipping generic title:', milestone.title);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Create the milestone
|
||||
const milestoneBody = {
|
||||
title: milestone.title,
|
||||
description: milestone.description || '',
|
||||
date: milestone.date,
|
||||
career_profile_id: scenarioRow.id,
|
||||
status: 'planned',
|
||||
progress: 0,
|
||||
is_universal: false,
|
||||
};
|
||||
|
||||
// Call your existing milestone endpoint
|
||||
const msRes = await auth(req, '/premium/milestone', {
|
||||
@ -2101,7 +2323,7 @@ if (embeddedJson) { // <── instead of startsWith("{")…
|
||||
if (createdMilestonesData.length > 0) {
|
||||
replyToClient = `
|
||||
I've created ${createdMilestonesData.length} milestones (with tasks & impacts) for you in this scenario.
|
||||
Check your Milestones tab. Let me know if you want any changes!
|
||||
Check your Milestones section below-you may need to refresh the browser. Let me know if you want any changes!
|
||||
`.trim();
|
||||
}
|
||||
}
|
||||
@ -2245,7 +2467,6 @@ Always end with: “AptivaAI is an educational tool – not advice.”
|
||||
if (!realKeys.length) payloadObj = null;
|
||||
|
||||
/* 6️⃣ persist changes */
|
||||
const apiBase = process.env.APTIVA_INTERNAL_API || 'http://localhost:5002/api';
|
||||
|
||||
if (payloadObj?.cloneScenario) {
|
||||
/* ------ CLONE ------ */
|
||||
@ -2336,7 +2557,7 @@ app.get('/api/premium/retire/chat/threads/:id', authenticatePremiumUser, async (
|
||||
/* POST a message (auto-create thread if missing) */
|
||||
app.post('/api/premium/retire/chat/threads/:id/messages', authenticatePremiumUser, async (req, res) => {
|
||||
const { id } = req.params;
|
||||
const { content = '', context = {} } = req.body || {};
|
||||
const { content = '', context = {}, role = 'user' } = req.body || {};
|
||||
if (!content.trim()) return res.status(400).json({ error: 'empty' });
|
||||
|
||||
// ensure thread exists (auto-create if missing)
|
||||
@ -2351,23 +2572,33 @@ app.post('/api/premium/retire/chat/threads/:id/messages', authenticatePremiumUse
|
||||
);
|
||||
}
|
||||
|
||||
// save user msg
|
||||
await pool.query(
|
||||
'INSERT INTO ai_chat_messages (thread_id,user_id,role,content) VALUES (?,?, "user", ?)',
|
||||
[id, req.id, content]
|
||||
);
|
||||
// persist only visible user messages; hidden quick-action prompts come in as role="system"
|
||||
if (role !== 'system') {
|
||||
await pool.query(
|
||||
'INSERT INTO ai_chat_messages (thread_id,user_id,role,content) VALUES (?,?, ?, ?)',
|
||||
[id, req.id, role === 'assistant' ? 'assistant' : 'user', content]
|
||||
);
|
||||
}
|
||||
|
||||
// history (≤40)
|
||||
const [history] = await pool.query(
|
||||
'SELECT role,content FROM ai_chat_messages WHERE thread_id=? ORDER BY id ASC LIMIT 40',
|
||||
// Get the latest 40, then restore chronological order
|
||||
const [historyRows] = await pool.query(
|
||||
'SELECT id, role, content FROM ai_chat_messages WHERE thread_id=? ORDER BY id DESC LIMIT 40',
|
||||
[id]
|
||||
);
|
||||
const history = historyRows.reverse().map(({ role, content }) => ({ role, content }));
|
||||
|
||||
// If the caller provided a transient system card (quick action), append it only for this AI turn
|
||||
const effectiveHistory =
|
||||
role === 'system'
|
||||
? [...history, { role: 'system', content }]
|
||||
: history;
|
||||
|
||||
|
||||
// call AI
|
||||
const resp = await internalFetch(req, '/premium/retirement/aichat', {
|
||||
method : 'POST',
|
||||
headers: { 'Content-Type':'application/json' },
|
||||
body : JSON.stringify({ prompt: content, scenario_id: context?.scenario_id, chatHistory: history })
|
||||
body : JSON.stringify({ prompt: content, scenario_id: context?.scenario_id, chatHistory: effectiveHistory })
|
||||
});
|
||||
|
||||
let reply = 'Sorry, please try again.';
|
||||
@ -2430,7 +2661,15 @@ app.get('/api/premium/coach/chat/threads/:id', authenticatePremiumUser, async (r
|
||||
/* POST a message (auto-create thread if missing) */
|
||||
app.post('/api/premium/coach/chat/threads/:id/messages', authenticatePremiumUser, async (req, res) => {
|
||||
const { id } = req.params;
|
||||
const { content = '', context = {} } = req.body || {};
|
||||
const {
|
||||
content = '',
|
||||
context = {},
|
||||
role = 'user',
|
||||
// NEW: honor one-turn context boost from quick actions
|
||||
forceContext = false,
|
||||
// NEW: persist the visible “Sure! …” note when the client sends a system card
|
||||
assistantNote = null
|
||||
} = req.body || {};
|
||||
if (!content.trim()) return res.status(400).json({ error: 'empty' });
|
||||
|
||||
// ensure thread exists (auto-create if missing)
|
||||
@ -2445,29 +2684,55 @@ app.post('/api/premium/coach/chat/threads/:id/messages', authenticatePremiumUser
|
||||
);
|
||||
}
|
||||
|
||||
// save user msg
|
||||
await pool.query(
|
||||
'INSERT INTO ai_chat_messages (thread_id,user_id,role,content) VALUES (?,?, "user", ?)',
|
||||
[id, req.id, content]
|
||||
);
|
||||
// persist only visible user messages; hidden quick-action prompts come in as role="system"
|
||||
if (role !== 'system') {
|
||||
await pool.query(
|
||||
'INSERT INTO ai_chat_messages (thread_id,user_id,role,content) VALUES (?,?, ?, ?)',
|
||||
[id, req.id, role === 'assistant' ? 'assistant' : 'user', content]
|
||||
);
|
||||
}
|
||||
|
||||
// NEW: When a quick action is triggered (role === 'system'), also persist the visible assistant note
|
||||
if (role === 'system' && assistantNote && assistantNote.trim()) {
|
||||
await pool.query(
|
||||
'INSERT INTO ai_chat_messages (thread_id,user_id,role,content) VALUES (?,?, "assistant", ?)',
|
||||
[id, req.id, assistantNote.trim()]
|
||||
);
|
||||
}
|
||||
|
||||
// Get the newest N rows so the model always sees the latest turns,
|
||||
// then reverse back to chronological.
|
||||
const [historyRows] = await pool.query(
|
||||
'SELECT id, role, content FROM ai_chat_messages WHERE thread_id=? ORDER BY id DESC LIMIT 120',
|
||||
[id]
|
||||
);
|
||||
const history = historyRows.reverse().map(({ role, content }) => ({ role, content }));
|
||||
|
||||
// If the caller provided a transient system card (quick action), append it only for this AI turn
|
||||
const effectiveHistory =
|
||||
role === 'system'
|
||||
? [...history, { role: 'system', content }]
|
||||
: history;
|
||||
|
||||
// history (≤40)
|
||||
const [history] = await pool.query(
|
||||
'SELECT role,content FROM ai_chat_messages WHERE thread_id=? ORDER BY id ASC LIMIT 40',
|
||||
[id]
|
||||
);
|
||||
|
||||
// call AI
|
||||
const resp = await internalFetch(req, '/premium/ai/chat', {
|
||||
method : 'POST',
|
||||
headers: { 'Content-Type':'application/json' },
|
||||
body : JSON.stringify({ ...context, chatHistory: history })
|
||||
});
|
||||
let resp;
|
||||
try {
|
||||
resp = await internalFetch(req, '/premium/ai/chat', {
|
||||
method : 'POST',
|
||||
headers: { 'Content-Type':'application/json' },
|
||||
body : JSON.stringify({ ...context, chatHistory: effectiveHistory, forceContext })
|
||||
});
|
||||
} catch (e) {
|
||||
console.error('internalFetch(/premium/ai/chat) failed:', e);
|
||||
return res.status(502).json({ error: 'upstream_failed' });
|
||||
}
|
||||
|
||||
let reply = 'Sorry, please try again.';
|
||||
if (resp.ok) {
|
||||
const json = await resp.json();
|
||||
reply = (json?.reply || '').trim() || reply;
|
||||
const created = Array.isArray(json?.createdMilestones) ? json.createdMilestones : [];
|
||||
|
||||
// save AI reply
|
||||
await pool.query(
|
||||
@ -2476,7 +2741,8 @@ app.post('/api/premium/coach/chat/threads/:id/messages', authenticatePremiumUser
|
||||
);
|
||||
await pool.query('UPDATE ai_chat_threads SET updated_at=CURRENT_TIMESTAMP WHERE id=?', [id]);
|
||||
|
||||
return res.json({ reply });
|
||||
// NEW: surface created milestones to the frontend so it can refresh Roadmap
|
||||
return res.json({ reply, createdMilestones: created });
|
||||
} else {
|
||||
return res.status(502).json({ error: 'upstream_failed' });
|
||||
}
|
||||
@ -4096,6 +4362,8 @@ app.post('/api/premium/tasks', authenticatePremiumUser, async (req, res) => {
|
||||
return new Date(utc).toISOString();
|
||||
}
|
||||
|
||||
const finalDue = due_date || null;
|
||||
|
||||
if (finalDue) { // only if task has a due date (incl. fallback)
|
||||
const [[profile]] = await pool.query(
|
||||
'SELECT phone_e164, phone_verified_at, sms_reminders_opt_in FROM user_profile WHERE id = ?',
|
||||
|
||||
@ -27,31 +27,24 @@ async function ensureCoachThread() {
|
||||
|
||||
const isHiddenPrompt = (m) => {
|
||||
if (!m || !m.content) return false;
|
||||
const c = String(m.content);
|
||||
// Heuristics that match your hidden prompts / modes
|
||||
return (
|
||||
m.role === 'system' ||
|
||||
c.startsWith('# ⛔️') ||
|
||||
c.startsWith('MODE :') ||
|
||||
c.startsWith('MODE:') ||
|
||||
c.includes('"milestones"') && c.includes('"tasks"') && c.includes('"date"') && c.includes('"title"')
|
||||
);
|
||||
const c = String(m.content).trim();
|
||||
// Hide only true system/driver cards or ops payloads — not normal assistant prose.
|
||||
const looksLikeOpsBlock = /^```ops/i.test(c);
|
||||
const looksLikeDriver = c.startsWith('# ⛔️') || c.startsWith('MODE :') || c.startsWith('MODE:');
|
||||
return m.role === 'system' || looksLikeDriver || looksLikeOpsBlock;
|
||||
};
|
||||
|
||||
function buildInterviewPrompt(careerName, jobDescription = "") {
|
||||
return `
|
||||
You are an expert interviewer for the role **${careerName}**.
|
||||
Ask one challenging behavioural or technical question **specific to this career**,
|
||||
wait for the candidate's reply, then:
|
||||
|
||||
• Score the answer 1–5
|
||||
• Give concise feedback (1-2 sentences)
|
||||
• Ask the next question (up to 5 total)
|
||||
|
||||
After 5 questions or if the user types "quit interview", end the session.
|
||||
|
||||
Do NOT output milestones JSON.`;
|
||||
}
|
||||
function buildOneShotInterviewPrompt(careerName, jobDescription = "") {
|
||||
return `
|
||||
MODE: interview_one_shot
|
||||
You are an expert interviewer for the role **${careerName}**${jobDescription ? ` (context: ${jobDescription})` : ''}.
|
||||
Produce EXACTLY ONE interview question tailored to this role.
|
||||
Output EXACTLY:
|
||||
Interview question: <one specific question>
|
||||
<!-- interview-oneshot-q -->
|
||||
No preface, no numbering, no extra text, no scoring, no follow‑ups.
|
||||
Do NOT output milestones JSON.`.trim();
|
||||
}
|
||||
|
||||
/* ----------------------------------------------
|
||||
Hidden prompts for the quick-action buttons
|
||||
@ -327,6 +320,7 @@ I'm here to support you with personalized coaching. What would you like to focus
|
||||
/* ------------ shared AI caller ------------- */
|
||||
async function callAi(updatedHistory, opts = {}) {
|
||||
setLoading(true);
|
||||
let replyText = ''; // the visible text we’ll append once
|
||||
try {
|
||||
let id = threadId; // <-- declare it
|
||||
if (!id) { // first send or race
|
||||
@ -340,24 +334,81 @@ I'm here to support you with personalized coaching. What would you like to focus
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
content: updatedHistory.at(-1)?.content || '',
|
||||
context
|
||||
})
|
||||
content : updatedHistory.at(-1)?.content || '',
|
||||
role : updatedHistory.at(-1)?.role || 'user',
|
||||
context,
|
||||
// allow one-turn context push for Quick Actions
|
||||
forceContext : Boolean(opts.forceContext),
|
||||
// NEW: persist the visible “Sure! …” assistant line into the thread history
|
||||
assistantNote: opts.assistantNote || null
|
||||
}),
|
||||
// Quick Actions can take ~10s while milestones + tasks are created
|
||||
timeoutMs: 30000,
|
||||
retryNonIdempotent: true
|
||||
});
|
||||
|
||||
let reply = 'Sorry, something went wrong.';
|
||||
if (r.ok && (r.headers.get('content-type') || '').includes('application/json')) {
|
||||
const data = await r.json();
|
||||
reply = (data?.reply || '').trim() || reply;
|
||||
if (r && r.status >= 200 && r.status < 300) {
|
||||
// 1) Try to read counts and reply from the JSON (if present)
|
||||
try {
|
||||
const ct = (r.headers.get('content-type') || '').toLowerCase();
|
||||
if (ct.includes('application/json')) {
|
||||
const data = await r.json();
|
||||
const created = Array.isArray(data?.createdMilestones) ?
|
||||
data.createdMilestones.length : 0;
|
||||
|
||||
if (created > 0 && typeof onMilestonesCreated === 'function')
|
||||
onMilestonesCreated(created);
|
||||
|
||||
replyText = (data?.reply || '').trim(); // may be empty if the model only emitted JSON
|
||||
|
||||
if (created > 0) {
|
||||
try { window.dispatchEvent(new CustomEvent('aptiva:milestones:changed', { detail:{ scenarioId:
|
||||
scenarioRow?.id, count: created } }));
|
||||
}
|
||||
catch {}
|
||||
}
|
||||
}
|
||||
} catch {/* fine; we’ll pull from thread */}
|
||||
|
||||
// 2) Only fall back to thread read if we didn't get a reply in JSON
|
||||
if (!replyText) {
|
||||
try {
|
||||
const th = await authFetch(`/api/premium/coach/chat/threads/${id}`, { timeoutMs: 30000 });
|
||||
if (th.ok && (th.headers.get('content-type') || '').includes('application/json')) {
|
||||
const { messages: msgs = [] } = await th.json();
|
||||
const visible = (Array.isArray(msgs) ? msgs : []).filter(m => !isHiddenPrompt(m));
|
||||
const lastAssistant = [...visible].reverse().find(m => m.role === 'assistant');
|
||||
if (lastAssistant?.content) replyText = String(lastAssistant.content);
|
||||
}
|
||||
} catch {/* ignore */}
|
||||
}
|
||||
} else {
|
||||
// 🔁 Salvage: the backend may have saved the reply or created milestones before erroring
|
||||
try {
|
||||
const th = await authFetch(`/api/premium/coach/chat/threads/${id}`, { timeoutMs: 30000 });
|
||||
if (th.ok && (th.headers.get('content-type') || '').includes('application/json')) {
|
||||
const { messages: msgs = [] } = await th.json();
|
||||
const visible = (Array.isArray(msgs) ? msgs : []).filter(m => !isHiddenPrompt(m));
|
||||
const lastAssistant = [...visible].reverse().find(m => m.role === 'assistant');
|
||||
if (lastAssistant?.content) replyText = String(lastAssistant.content);
|
||||
}
|
||||
} catch {}
|
||||
if (!replyText) {
|
||||
setMessages(prev => [...prev, { role: 'assistant', content: 'Sorry, something went wrong.' }]);
|
||||
}
|
||||
setMessages(prev => [...prev, { role: 'assistant', content: reply }]);
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
setMessages(prev => [...prev, { role: 'assistant', content: 'Sorry, something went wrong.' }]);
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
if (e?.name !== 'AbortError') { console.error(e);
|
||||
setMessages(prev => [...prev, { role: 'assistant', content: 'Sorry, something went wrong.' }]);
|
||||
}
|
||||
} finally {
|
||||
setLoading(false);
|
||||
if (replyText) {
|
||||
setMessages(prev => [...prev, { role: 'assistant', content: replyText }]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* ------------ normal send ------------- */
|
||||
function handleSubmit(e) {
|
||||
@ -384,13 +435,12 @@ I'm here to support you with personalized coaching. What would you like to focus
|
||||
?.content.split("\n").slice(2).join("\n") || "";
|
||||
|
||||
/* 1) Mock-Interview (special flow) */
|
||||
if (type === "interview") {
|
||||
const desc = scenarioRow?.job_description || "";
|
||||
const hiddenSystem = { role:"system", content: buildInterviewPrompt(careerName, desc) };
|
||||
const note = { role:"assistant", content:`Starting mock interview on **${careerName}**. Answer each question and I'll give feedback!` };
|
||||
const updated = [...messages, note, hiddenSystem];
|
||||
setMessages([...messages, note]);
|
||||
callAi(updated);
|
||||
if (type === "interview") {
|
||||
const desc = scenarioRow?.job_description || "";
|
||||
const hiddenSystem = { role:"system", content: buildOneShotInterviewPrompt(careerName, desc) };
|
||||
// No visible preface; just ask the question.
|
||||
const updated = [...messages, hiddenSystem];
|
||||
callAi(updated, { forceContext: true });
|
||||
return;
|
||||
}
|
||||
|
||||
@ -414,7 +464,10 @@ I'm here to support you with personalized coaching. What would you like to focus
|
||||
const updated = [...messages, note, hiddenSystem];
|
||||
setMessages([...messages, note]);
|
||||
const needsContext = ["networking", "jobSearch", "aiGrowth"].includes(type);
|
||||
callAi(updated, {forceContext: needsContext});
|
||||
callAi(updated, {
|
||||
forceContext: needsContext,
|
||||
assistantNote: note.content
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ -576,24 +576,6 @@ useEffect(() => {
|
||||
return () => clearTimeout(timer);
|
||||
}, [buttonDisabled]);
|
||||
|
||||
/* ------------------------------------------------------------------
|
||||
* 1) Restore AI recommendations (unchanged behaviour)
|
||||
* -----------------------------------------------------------------*/
|
||||
useEffect(() => {
|
||||
const json = localStorage.getItem('aiRecommendations');
|
||||
if (!json) return;
|
||||
|
||||
try {
|
||||
const arr = JSON.parse(json).map((m) => ({
|
||||
...m,
|
||||
id: m.id || crypto.randomUUID()
|
||||
}));
|
||||
setRecommendations(arr);
|
||||
} catch (err) {
|
||||
console.error('Error parsing stored AI recs', err);
|
||||
}
|
||||
}, []);
|
||||
|
||||
/* ------------------------------------------------------------------
|
||||
* 2) Whenever the careerProfileId changes, clear the modal check flag
|
||||
* -----------------------------------------------------------------*/
|
||||
@ -1370,6 +1352,18 @@ const handleMilestonesCreated = useCallback(
|
||||
[fetchMilestones]
|
||||
);
|
||||
|
||||
// Refresh milestones when the chat layer announces changes
|
||||
useEffect(() => {
|
||||
const handler = (e) => {
|
||||
// If an event specifies a scenarioId, ignore other scenarios
|
||||
const incoming = e?.detail?.scenarioId;
|
||||
if (incoming && String(incoming) !== String(careerProfileId)) return;
|
||||
fetchMilestones();
|
||||
};
|
||||
window.addEventListener('aptiva:milestones:changed', handler);
|
||||
return () => window.removeEventListener('aptiva:milestones:changed', handler);
|
||||
}, [careerProfileId, fetchMilestones]);
|
||||
|
||||
return (
|
||||
<div className="milestone-tracker max-w-screen-lg mx-auto px-4 py-6 space-y-4">
|
||||
|
||||
|
||||
@ -58,6 +58,8 @@ export default function ChatDrawer({
|
||||
const [prompt, setPrompt] = useState('');
|
||||
const [messages, setMessages] = useState([]); // { role, content }
|
||||
const listRef = useRef(null);
|
||||
const SUPPORT_INTRO =
|
||||
"Hi — Aptiva Support here. I can help with CareerExplorer, account/billing, or technical issues. What do you need?";
|
||||
|
||||
/* auto-scroll on incoming messages */
|
||||
useEffect(() => {
|
||||
@ -66,25 +68,27 @@ export default function ChatDrawer({
|
||||
}, [messages]);
|
||||
|
||||
useEffect(() => {
|
||||
(async () => {
|
||||
try {
|
||||
const id = await ensureSupportThread();
|
||||
setSupportThreadId(id);
|
||||
// preload messages
|
||||
const r = await fetch(`/api/chat/threads/${id}`, { credentials:'include' });
|
||||
if (r.ok) {
|
||||
const { messages: msgs } = await r.json();
|
||||
setMessages(msgs || []);
|
||||
} else {
|
||||
// don’t crash UI on preload failure
|
||||
setMessages([]);
|
||||
(async () => {
|
||||
try {
|
||||
const id = await ensureSupportThread();
|
||||
setSupportThreadId(id);
|
||||
// preload messages
|
||||
const r = await fetch(`/api/chat/threads/${id}`, { credentials: 'include' });
|
||||
if (r.ok) {
|
||||
const { messages: msgs } = await r.json();
|
||||
setMessages((Array.isArray(msgs) && msgs.length)
|
||||
? msgs
|
||||
: [{ role: 'assistant', content: SUPPORT_INTRO }]);
|
||||
} else {
|
||||
// don’t crash UI on preload failure
|
||||
setMessages([{ role: 'assistant', content: SUPPORT_INTRO }]);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error('[Support preload]', e);
|
||||
setMessages([{ role: 'assistant', content: SUPPORT_INTRO }]);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error('[Support preload]', e);
|
||||
setMessages([]);
|
||||
}
|
||||
})();
|
||||
}, []);
|
||||
})();
|
||||
}, []);
|
||||
|
||||
/* helper: merge chunks while streaming */
|
||||
const pushAssistant = (chunk) =>
|
||||
@ -232,6 +236,10 @@ export default function ChatDrawer({
|
||||
</div>
|
||||
|
||||
<div className="border-t p-4">
|
||||
{/* Persistent disclaimer */}
|
||||
<div className="text-xs text-gray-600 mb-2">
|
||||
⚠ Aptiva bots may be incomplete or inaccurate. Verify important details before acting.
|
||||
</div>
|
||||
<form
|
||||
onSubmit={(e) => {
|
||||
e.preventDefault();
|
||||
|
||||
Loading…
Reference in New Issue
Block a user