Coach and chatbot fixes
All checks were successful
ci/woodpecker/manual/woodpecker Pipeline was successful
All checks were successful
ci/woodpecker/manual/woodpecker Pipeline was successful
This commit is contained in:
parent
caa78298ec
commit
8ac77b6ae1
@ -1 +1 @@
|
|||||||
c5704005f291ecf264cdc92403119e7db5831e61-372bcf506971f56c4911b429b9f5de5bc37ed008-e9eccd451b778829eb2f2c9752c670b707e1268b
|
960faad1696b81c0f004065ea713edaef64ab816-372bcf506971f56c4911b429b9f5de5bc37ed008-e9eccd451b778829eb2f2c9752c670b707e1268b
|
||||||
|
|||||||
@ -1744,6 +1744,13 @@ app.post('/api/chat/threads', authenticateUser, async (req, res) => {
|
|||||||
await pool.query(
|
await pool.query(
|
||||||
'INSERT INTO ai_chat_threads (id,user_id,bot_type,title) VALUES (?,?, "support", ?)',
|
'INSERT INTO ai_chat_threads (id,user_id,bot_type,title) VALUES (?,?, "support", ?)',
|
||||||
[id, userId, title]
|
[id, userId, title]
|
||||||
|
);
|
||||||
|
// Seed a first assistant message so the drawer never appears blank
|
||||||
|
const intro =
|
||||||
|
'Hi — Aptiva Support here. I can help with CareerExplorer, account/billing, or technical issues. What do you need?';
|
||||||
|
await pool.query(
|
||||||
|
'INSERT INTO ai_chat_messages (thread_id,user_id,role,content) VALUES (?,?, "assistant", ?)',
|
||||||
|
[id, userId, intro]
|
||||||
);
|
);
|
||||||
res.json({ id, title });
|
res.json({ id, title });
|
||||||
});
|
});
|
||||||
|
|||||||
@ -1347,7 +1347,11 @@ app.delete('/api/premium/career-profile/:careerProfileId', authenticatePremiumUs
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
app.post('/api/premium/ai/chat', authenticatePremiumUser, chatGate('coach'), async (req, res) => {
|
app.post(
|
||||||
|
'/api/premium/ai/chat',
|
||||||
|
authenticatePremiumUser,
|
||||||
|
chatGate('coach'),
|
||||||
|
async (req, res) => {
|
||||||
try {
|
try {
|
||||||
const {
|
const {
|
||||||
userProfile = {},
|
userProfile = {},
|
||||||
@ -1355,30 +1359,30 @@ app.post('/api/premium/ai/chat', authenticatePremiumUser, chatGate('co
|
|||||||
financialProfile = {},
|
financialProfile = {},
|
||||||
collegeProfile = {},
|
collegeProfile = {},
|
||||||
chatHistory = [],
|
chatHistory = [],
|
||||||
forceContext = false
|
forceContext = false,
|
||||||
} = req.body;
|
} = req.body;
|
||||||
|
|
||||||
let existingTitles = [];
|
let existingTitles = [];
|
||||||
let miniGrid = "-none-"; // slim grid
|
let miniGrid = '-none-'; // slim grid for current milestones
|
||||||
|
|
||||||
|
// Fetch existing milestones for this user + scenario
|
||||||
try {
|
try {
|
||||||
const [rows] = await pool.query(
|
const [rows] = await pool.query(
|
||||||
|
|
||||||
`SELECT id, DATE_FORMAT(date,'%Y-%m-%d') AS d, title
|
`SELECT id, DATE_FORMAT(date,'%Y-%m-%d') AS d, title
|
||||||
FROM milestones
|
FROM milestones
|
||||||
WHERE user_id = ? AND career_profile_id = ?`,
|
WHERE user_id = ? AND career_profile_id = ?`,
|
||||||
[req.id, scenarioRow.id]
|
[req.id, scenarioRow.id]
|
||||||
);
|
);
|
||||||
|
|
||||||
existingTitles = rows.map(r => `${r.title.trim()}|${r.d}`);
|
existingTitles = rows.map((r) => `${r.title.trim()}|${r.d}`);
|
||||||
|
|
||||||
if (rows.length) {
|
if (rows.length) {
|
||||||
miniGrid = rows
|
miniGrid = rows
|
||||||
.map(r => `${r.id}|${r.title.trim()}|${r.d}`)
|
.map((r) => `${r.id}|${r.title.trim()}|${r.d}`)
|
||||||
.join("\n");
|
.join('\n');
|
||||||
}
|
}
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
console.error("Could not fetch existing milestones ⇒", e);
|
console.error('Could not fetch existing milestones ⇒', e);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------
|
// ------------------------------------------------
|
||||||
@ -1433,7 +1437,7 @@ app.post('/api/premium/ai/chat', authenticatePremiumUser, chatGate('co
|
|||||||
|
|
||||||
// Friendly note - feel free to tweak the wording
|
// Friendly note - feel free to tweak the wording
|
||||||
const friendlyNote = `
|
const friendlyNote = `
|
||||||
Feel free to use AptivaAI however it best suits you—there’s no "wrong" answer.
|
Feel free to use AptivaAI however it best suits you—there’s no "wrong" answer but Coach is designed to provide you with actionable steps towards your goals.
|
||||||
It doesn’t matter so much where you've been; it's about where you want to go from here.
|
It doesn’t matter so much where you've been; it's about where you want to go from here.
|
||||||
We can refine details any time or jump straight to what you’re most eager to explore right now.
|
We can refine details any time or jump straight to what you’re most eager to explore right now.
|
||||||
|
|
||||||
@ -1445,6 +1449,22 @@ I'm here to support you with personalized coaching—what would you like to focu
|
|||||||
return `${combinedDescription}\n\n${friendlyNote}`;
|
return `${combinedDescription}\n\n${friendlyNote}`;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// One‑shot interview helpers
|
||||||
|
const getLastAssistant = (hist) => {
|
||||||
|
if (!Array.isArray(hist)) return null;
|
||||||
|
for (let i = hist.length - 1; i >= 0; i--) {
|
||||||
|
const m = hist[i];
|
||||||
|
if (m && m.role === 'assistant' && typeof m.content === 'string') return m;
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
};
|
||||||
|
const stripTags = (s='') => s.replace(/<[^>]+>/g, '');
|
||||||
|
const lastAssistant = getLastAssistant(chatHistory);
|
||||||
|
const lastAssistantText = stripTags(lastAssistant?.content || '');
|
||||||
|
const lastAssistantIsOneShotQ =
|
||||||
|
/<!--\s*interview-oneshot-q\s*-->/.test(lastAssistant?.content || '') ||
|
||||||
|
/^Interview question\s*:/i.test(lastAssistantText);
|
||||||
|
|
||||||
// B. Build a user summary that references all available info (unchanged from your code)
|
// B. Build a user summary that references all available info (unchanged from your code)
|
||||||
function buildUserSummary({
|
function buildUserSummary({
|
||||||
userProfile = {},
|
userProfile = {},
|
||||||
@ -1633,7 +1653,6 @@ ${econText}
|
|||||||
// ------------------------------------------------
|
// ------------------------------------------------
|
||||||
// 2. AI Risk Fetch
|
// 2. AI Risk Fetch
|
||||||
// ------------------------------------------------
|
// ------------------------------------------------
|
||||||
const apiBase = process.env.APTIVA_INTERNAL_API || "http://localhost:5002/api";
|
|
||||||
let aiRisk = null;
|
let aiRisk = null;
|
||||||
try {
|
try {
|
||||||
const aiRiskRes = await auth(
|
const aiRiskRes = await auth(
|
||||||
@ -1713,8 +1732,10 @@ Our mission is to help people grow *with* AI rather than be displaced by it.
|
|||||||
Speak in a warm, encouraging tone, but prioritize *specific next steps* over generic motivation.
|
Speak in a warm, encouraging tone, but prioritize *specific next steps* over generic motivation.
|
||||||
Validate ambitions, break big goals into realistic milestones, and show how AI can be a collaborator.
|
Validate ambitions, break big goals into realistic milestones, and show how AI can be a collaborator.
|
||||||
|
|
||||||
Finish every reply with **one concrete suggestion or question** that moves the plan forward.
|
Finish every reply with **one concise follow‑up** that stays on the **same topic as the user’s last message**.
|
||||||
|
Do **not** propose or ask about roadmaps/milestones/interviews unless the user **explicitly asked** (or pressed a quick‑action button).
|
||||||
Never ask for info you already have unless you truly need clarification.
|
Never ask for info you already have unless you truly need clarification.
|
||||||
|
|
||||||
`.trim();
|
`.trim();
|
||||||
|
|
||||||
|
|
||||||
@ -1800,6 +1821,17 @@ ${combinedStatusSituation}
|
|||||||
${summaryText}
|
${summaryText}
|
||||||
`.trim();
|
`.trim();
|
||||||
|
|
||||||
|
const systemPromptDirectQA = `
|
||||||
|
Answer the user's **last message directly and specifically**.
|
||||||
|
|
||||||
|
Rules:
|
||||||
|
• Start with the direct answer in 1–3 short paragraphs or a clear bullet list.
|
||||||
|
• Stay on the user’s topic; do **not** propose or ask about roadmaps, milestones, or interviews unless the user explicitly asked in the last turn.
|
||||||
|
• Use the user's region (${userProfile?.area || userProfile?.state || 'their region'}) when location matters.
|
||||||
|
• If key data is missing, say what you need and ask **one** precise follow‑up.
|
||||||
|
• Avoid repeating the same sentence you used previously (e.g., “I can create a roadmap…”).`.trim();
|
||||||
|
|
||||||
|
|
||||||
const dynMilestonePrompt = `
|
const dynMilestonePrompt = `
|
||||||
[CURRENT MILESTONES]
|
[CURRENT MILESTONES]
|
||||||
Use **exactly** the UUID at the start of each line when you refer to a milestone
|
Use **exactly** the UUID at the start of each line when you refer to a milestone
|
||||||
@ -1870,57 +1902,204 @@ If you’re asked for short-term dates, they still must be ≥ ${todayISO}.
|
|||||||
Reject or re-ask if the user insists on a past date.
|
Reject or re-ask if the user insists on a past date.
|
||||||
`.trim();
|
`.trim();
|
||||||
|
|
||||||
const avoidBlock = existingTitles.length
|
const avoidBlock =
|
||||||
? "\nAVOID any milestone whose title matches REGEXP /" +
|
existingTitles.length
|
||||||
existingTitles.map(t => `(?:${t.split("|")[0].replace(/[.*+?^${}()|[\]\\]/g,"\\$&")})`)
|
? `\nAVOID any milestone whose title matches REGEXP /${existingTitles
|
||||||
.join("|") + "/i"
|
.map((t) =>
|
||||||
: "";
|
`(?:${t.split('|')[0].replace(/[.*+?^${}()|[\]\\]/g, '\\$&')})`
|
||||||
|
)
|
||||||
|
.join('|')}/i`
|
||||||
const recentHistory = chatHistory.slice(-MAX_CHAT_TURNS);
|
: '';
|
||||||
|
|
||||||
const firstTurn = chatHistory.length === 0;
|
|
||||||
|
|
||||||
const STATIC_SYSTEM_CARD = `
|
|
||||||
${systemPromptIntro}
|
|
||||||
|
|
||||||
${systemPromptOpsCheatSheet}
|
|
||||||
|
|
||||||
/* Milestone JSON spec, date guard, and avoid-list */
|
|
||||||
${systemPromptMilestoneFormat}
|
|
||||||
${systemPromptDateGuard}
|
|
||||||
${avoidBlock}
|
|
||||||
`.trim();
|
|
||||||
|
|
||||||
const NEEDS_OPS_CARD = !chatHistory.some(
|
|
||||||
m => m.role === "system" && m.content.includes("APTIVA OPS CHEAT-SHEET")
|
|
||||||
);
|
|
||||||
|
|
||||||
const NEEDS_CTX_CARD = !chatHistory.some(
|
|
||||||
m => m.role === "system" && m.content.startsWith("[DETAILED USER PROFILE]")
|
|
||||||
);
|
|
||||||
|
|
||||||
const SEND_CTX_CARD = forceContext || NEEDS_CTX_CARD;
|
|
||||||
|
|
||||||
|
|
||||||
|
const MAX_TURNS = 20;
|
||||||
const messagesToSend = [];
|
const messagesToSend = [];
|
||||||
|
|
||||||
// ① Large, unchanging card – once per conversation
|
// Intro card only at conversation start (lightweight)
|
||||||
if (NEEDS_OPS_CARD) {
|
if ((chatHistory?.length || 0) < 2) {
|
||||||
messagesToSend.push({ role: "system", content: STATIC_SYSTEM_CARD });
|
messagesToSend.push({ role: "system", content: systemPromptIntro });
|
||||||
}
|
}
|
||||||
|
|
||||||
if (NEEDS_CTX_CARD || SEND_CTX_CARD)
|
// Always include detailed context at start or when explicitly requested
|
||||||
messagesToSend.push({ role:"system", content: summaryText });
|
if ((chatHistory?.length || 0) < 2 || forceContext) {
|
||||||
|
messagesToSend.push({ role: "system", content: systemPromptDetailedContext });
|
||||||
|
}
|
||||||
|
|
||||||
// ② Per-turn contextual helpers (small!)
|
// Per-turn, always small helpers
|
||||||
messagesToSend.push(
|
messagesToSend.push(
|
||||||
{ role: "system", content: systemPromptStatusSituation },
|
{ role: "system", content: systemPromptStatusSituation },
|
||||||
{ role: "system", content: dynMilestonePrompt } // <-- grid replaces two old lines
|
|
||||||
);
|
);
|
||||||
|
|
||||||
// ③ Recent conversational context
|
// Is the latest turn a Quick Action (we inject a transient MODE:... system card)?
|
||||||
messagesToSend.push(...chatHistory.slice(-MAX_CHAT_TURNS));
|
const lastTurn = Array.isArray(chatHistory) ? chatHistory[chatHistory.length - 1] : null;
|
||||||
|
|
||||||
|
// Find the last user message content (plain string)
|
||||||
|
const lastUserMsg = (() => {
|
||||||
|
if (!Array.isArray(chatHistory)) return '';
|
||||||
|
for (let i = chatHistory.length - 1; i >= 0; i--) {
|
||||||
|
const m = chatHistory[i];
|
||||||
|
if (m && m.role === 'user' && typeof m.content === 'string') {
|
||||||
|
return m.content.trim();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return '';
|
||||||
|
})();
|
||||||
|
|
||||||
|
// Very small intent heuristic for "info questions" (salary, duties, etc.)
|
||||||
|
const INFO_Q_RX = /\b(salary|pay|compensation|starting\s*salary|median|range|job\s*description|dut(y|ies)|requirements?|skills|responsibilit(y|ies)|what\s+is|how\s+much|how\s+do)\b/i;
|
||||||
|
const isInfoQuestion = !!lastUserMsg && INFO_Q_RX.test(lastUserMsg);
|
||||||
|
|
||||||
|
// —— Interview state helpers ——
|
||||||
|
|
||||||
|
// Find most recent "Starting mock interview" assistant note (persisted by frontend)
|
||||||
|
function findInterviewStartIdx(history) {
|
||||||
|
const h = Array.isArray(history) ? history : [];
|
||||||
|
for (let i = h.length - 1; i >= 0; i--) {
|
||||||
|
const m = h[i];
|
||||||
|
if (m.role === 'assistant' && /Starting mock interview/i.test(m.content || '')) {
|
||||||
|
return i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count questions asked and capture the last asked text
|
||||||
|
function extractInterviewProgress(history) {
|
||||||
|
const asked = [];
|
||||||
|
for (const m of history) {
|
||||||
|
if (m.role !== 'assistant' || typeof m.content !== 'string') continue;
|
||||||
|
const t = m.content.replace(/<[^>]+>/g, ''); // strip tags
|
||||||
|
// Accept several formats: "Next question:", "**Question 3:**", "Question 3:"
|
||||||
|
const rx = /(?:^|\n)\s*(?:Next question:|\*{0,2}\s*Question\s+\d+\s*:)\s*(.+)/gi;
|
||||||
|
let match;
|
||||||
|
while ((match = rx.exec(t))) asked.push(match[1].trim());
|
||||||
|
}
|
||||||
|
const lastAskedText = asked.length ? asked[asked.length - 1] : '';
|
||||||
|
return { askedCount: asked.length, lastAskedText };
|
||||||
|
}
|
||||||
|
|
||||||
|
function wasInterviewJustEnded(history) {
|
||||||
|
return (history || []).some(
|
||||||
|
(m) => m.role === 'assistant' && /<!--\s*interview-complete\s*-->/i.test(m.content || '')
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Last assistant message (to prevent repeating the pitch)
|
||||||
|
const lastAssistantMsg = (() => {
|
||||||
|
if (!Array.isArray(chatHistory)) return '';
|
||||||
|
for (let i = chatHistory.length - 1; i >= 0; i--) {
|
||||||
|
const m = chatHistory[i];
|
||||||
|
if (m && m.role === 'assistant' && typeof m.content === 'string') {
|
||||||
|
return m.content.trim();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return '';
|
||||||
|
})();
|
||||||
|
const ASSISTANT_PITCHED_PLAN = /roadmap|milestone|plan\b/i.test(lastAssistantMsg);
|
||||||
|
|
||||||
|
// Parse MODE on the last transient system card (if any)
|
||||||
|
const modeMatch =
|
||||||
|
lastTurn && lastTurn.role === 'system'
|
||||||
|
? /MODE\s*:\s*([A-Za-z_]+)/i.exec(lastTurn.content || '')
|
||||||
|
: null;
|
||||||
|
const MODE = (modeMatch ? modeMatch[1] : '').toLowerCase();
|
||||||
|
|
||||||
|
// One‑shot *start* (ask exactly one question)
|
||||||
|
const IS_ONESHOT_START = MODE === 'interview_one_shot';
|
||||||
|
// One‑shot *evaluation* (user replied to last one‑shot question)
|
||||||
|
const IS_ONESHOT_EVAL = !IS_ONESHOT_START && lastAssistantIsOneShotQ;
|
||||||
|
|
||||||
|
const PLAN_MODES = new Set(['networking_plan', 'job_search_plan', 'ai_growth']);
|
||||||
|
|
||||||
|
const IS_PLAN_TURN = PLAN_MODES.has(MODE);
|
||||||
|
const IS_INTERVIEW_TURN = MODE === 'interview';
|
||||||
|
|
||||||
|
// Detect an ongoing interview even if the MODE card wasn’t sent this turn
|
||||||
|
const recent = (chatHistory || []).slice(-20);
|
||||||
|
const hasRecentInterviewCard = recent.some(
|
||||||
|
(m) => m.role === 'system' && /MODE\s*:\s*interview/i.test(m.content || '')
|
||||||
|
);
|
||||||
|
const lastAssistantRecent = [...recent].reverse().find((m) => m.role === 'assistant');
|
||||||
|
const assistantLooksInterview =
|
||||||
|
lastAssistantRecent &&
|
||||||
|
/(Next question:|^\s*\*{0,2}Question\s+\d+\s*:|Starting mock interview)/i.test(
|
||||||
|
(lastAssistantRecent.content || '').replace(/<[^>]+>/g, '')
|
||||||
|
);
|
||||||
|
const interviewEndedRecently = wasInterviewJustEnded(chatHistory);
|
||||||
|
|
||||||
|
// Active session?
|
||||||
|
const IN_INTERVIEW_SESSION = (IS_INTERVIEW_TURN || hasRecentInterviewCard || assistantLooksInterview) && !interviewEndedRecently;
|
||||||
|
|
||||||
|
// Interview progress + next index
|
||||||
|
const interviewStartIdx = findInterviewStartIdx(chatHistory);
|
||||||
|
const { askedCount: interviewQCount, lastAskedText } = extractInterviewProgress(chatHistory);
|
||||||
|
const nextQNumber = Math.min(5, interviewQCount + 1);
|
||||||
|
const userQuitInterview = /\b(quit|end|stop)\s+interview\b/i.test(lastUserMsg || '');
|
||||||
|
const INTERVIEW_COMPLETE = IN_INTERVIEW_SESSION && (interviewQCount >= 5 || userQuitInterview);
|
||||||
|
|
||||||
|
// Build the model window: either the interview segment, or the last MAX_TURNS
|
||||||
|
|
||||||
|
const INTERVIEW_SEGMENT_CAP = 100; // ~ ample for 5 Q/A pairs
|
||||||
|
const windowForModel = (() => {
|
||||||
|
if (IN_INTERVIEW_SESSION && interviewStartIdx !== -1) {
|
||||||
|
const start = Math.max(interviewStartIdx, chatHistory.length - INTERVIEW_SEGMENT_CAP);
|
||||||
|
return chatHistory.slice(start);
|
||||||
|
}
|
||||||
|
return (chatHistory || []).slice(-MAX_TURNS);
|
||||||
|
})();
|
||||||
|
messagesToSend.push(...windowForModel);
|
||||||
|
|
||||||
|
// Guards by mode
|
||||||
|
let willAppendInterviewCompleteTag = false;
|
||||||
|
|
||||||
|
if (IS_ONESHOT_START) {
|
||||||
|
// Hard guard: output *exactly* one question + sentinel comment
|
||||||
|
const oneShotAsk = `
|
||||||
|
You are generating one interview question only.
|
||||||
|
Output EXACTLY:
|
||||||
|
Interview question: <one specific question>
|
||||||
|
<!-- interview-oneshot-q -->
|
||||||
|
No other text, no scoring, no numbering, no follow‑ups.
|
||||||
|
`.trim();
|
||||||
|
messagesToSend.push({ role: 'system', content: oneShotAsk });
|
||||||
|
|
||||||
|
} else if (IS_ONESHOT_EVAL) {
|
||||||
|
// Evaluate the user's last message against the immediately preceding question
|
||||||
|
const oneShotEval = `
|
||||||
|
Evaluate the user's most recent message as an answer to the immediately preceding interview question.
|
||||||
|
Write exactly these four lines (plain text, no markdown):
|
||||||
|
Score: X/5
|
||||||
|
Why: <1–2 sentences>
|
||||||
|
Strength: <short phrase>
|
||||||
|
Improve: <one targeted suggestion>
|
||||||
|
Do not ask another question. Do not propose roadmaps or milestones.
|
||||||
|
<!-- interview-oneshot-done -->
|
||||||
|
`.trim();
|
||||||
|
messagesToSend.push({ role: 'system', content: oneShotEval });
|
||||||
|
|
||||||
|
} else if (IS_PLAN_TURN) {
|
||||||
|
|
||||||
|
messagesToSend.push(
|
||||||
|
{ role: "system", content: dynMilestonePrompt },
|
||||||
|
{ role: "system", content: `${systemPromptMilestoneFormat}\n${systemPromptDateGuard}${avoidBlock}` }
|
||||||
|
);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// Normal chat guard (your existing one)
|
||||||
|
const systemPromptNormalReplyGuard = `
|
||||||
|
You are in NORMAL CHAT mode (not plan creation).
|
||||||
|
1) Answer the user's latest message directly in the first paragraph.
|
||||||
|
2) Do NOT suggest creating a roadmap or milestones unless the user explicitly asks.
|
||||||
|
3) Do NOT repeat the same offer or question from your previous reply.
|
||||||
|
4) Keep it concise; add one helpful next question only if needed.
|
||||||
|
${ASSISTANT_PITCHED_PLAN ? `5) Your last reply mentioned a roadmap/milestones — do not repeat that. Answer the user’s question directly.` : ``}
|
||||||
|
${isInfoQuestion ? `6) The user asked an information question ("${lastUserMsg.slice(0,160)}"). Provide the requested facts. If you lack exact data, say so and ask ONE precise follow-up.` : ``}
|
||||||
|
`.trim();
|
||||||
|
messagesToSend.push({ role: "system", content: systemPromptNormalReplyGuard });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recent conversational context (trimmed)
|
||||||
|
messagesToSend.push(...(chatHistory || []).slice(-MAX_TURNS));
|
||||||
|
|
||||||
|
|
||||||
// ------------------------------------------------
|
// ------------------------------------------------
|
||||||
// 6. Call GPT (unchanged)
|
// 6. Call GPT (unchanged)
|
||||||
@ -1941,14 +2120,31 @@ messagesToSend.push(...chatHistory.slice(-MAX_CHAT_TURNS));
|
|||||||
reply: "Sorry, I didn't get a response. Could you please try again?"
|
reply: "Sorry, I didn't get a response. Could you please try again?"
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
// Prepare containers BEFORE we do any parsing so TDZ can't bite us
|
||||||
/* 🔹 NEW: detect fenced ```ops``` JSON */
|
let createdMilestonesData = [];
|
||||||
let opsConfirmations = [];
|
let opsConfirmations = [];
|
||||||
|
/* 🔹 detect fenced ```ops``` JSON */
|
||||||
const opsMatch = rawReply.match(/```ops\s*([\s\S]*?)```/i);
|
const opsMatch = rawReply.match(/```ops\s*([\s\S]*?)```/i);
|
||||||
if (opsMatch) {
|
if (opsMatch) {
|
||||||
try {
|
try {
|
||||||
const opsObj = JSON.parse(opsMatch[1]);
|
const opsObj = JSON.parse(opsMatch[1]);
|
||||||
opsConfirmations = await applyOps(opsObj, req);
|
opsConfirmations = await applyOps(opsObj, req);
|
||||||
|
|
||||||
|
// NEW: if there were CREATE ops, reflect that back to the client
|
||||||
|
if (Array.isArray(opsObj?.milestones)) {
|
||||||
|
const createdOps = opsObj.milestones.filter(
|
||||||
|
(m) => String(m.op || '').toUpperCase() === 'CREATE'
|
||||||
|
);
|
||||||
|
if (createdOps.length) {
|
||||||
|
createdMilestonesData.push(
|
||||||
|
...createdOps.map((m) => ({
|
||||||
|
milestoneId: null,
|
||||||
|
title: m?.data?.title || ''
|
||||||
|
}))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
console.error("Could not parse ops JSON:", e);
|
console.error("Could not parse ops JSON:", e);
|
||||||
}
|
}
|
||||||
@ -1958,6 +2154,16 @@ if (opsMatch) {
|
|||||||
let visibleReply = rawReply.replace(/```ops[\s\S]*?```/i, "").trim();
|
let visibleReply = rawReply.replace(/```ops[\s\S]*?```/i, "").trim();
|
||||||
if (!visibleReply) visibleReply = "Done!";
|
if (!visibleReply) visibleReply = "Done!";
|
||||||
|
|
||||||
|
// Normalize interview labels if the model tried to enumerate
|
||||||
|
visibleReply = visibleReply
|
||||||
|
.replace(/Here'?s your first question\s*:\s*/ig, 'Interview question: ')
|
||||||
|
.replace(/\*\*?\s*Question\s*\d+\s*:\s*/ig, 'Interview question: ');
|
||||||
|
|
||||||
|
// If we just asked for a wrap-up, tag this message as complete so future turns exit interview mode
|
||||||
|
if (willAppendInterviewCompleteTag) {
|
||||||
|
visibleReply = `${visibleReply}\n<!-- interview-complete -->`;
|
||||||
|
}
|
||||||
|
|
||||||
/* If we executed any ops, append a quick summary */
|
/* If we executed any ops, append a quick summary */
|
||||||
if (opsConfirmations.length) {
|
if (opsConfirmations.length) {
|
||||||
visibleReply +=
|
visibleReply +=
|
||||||
@ -1972,7 +2178,6 @@ if (opsConfirmations.length) {
|
|||||||
|
|
||||||
// 5) Default: Just return raw text to front-end
|
// 5) Default: Just return raw text to front-end
|
||||||
let replyToClient = visibleReply;
|
let replyToClient = visibleReply;
|
||||||
let createdMilestonesData = [];
|
|
||||||
|
|
||||||
// ── NEW: pull out the first JSON object/array even if text precedes it ──
|
// ── NEW: pull out the first JSON object/array even if text precedes it ──
|
||||||
const firstBrace = rawReply.search(/[{\[]/); // first “{” or “[”
|
const firstBrace = rawReply.search(/[{\[]/); // first “{” or “[”
|
||||||
@ -1985,28 +2190,45 @@ if (firstBrace !== -1 && lastJsonEdge > firstBrace) {
|
|||||||
embeddedJson = rawReply.slice(firstBrace, lastJsonEdge + 1).trim();
|
embeddedJson = rawReply.slice(firstBrace, lastJsonEdge + 1).trim();
|
||||||
}
|
}
|
||||||
|
|
||||||
// … then change the existing check:
|
if (embeddedJson) {
|
||||||
if (embeddedJson) { // <── instead of startsWith("{")…
|
|
||||||
try {
|
try {
|
||||||
const planObj = JSON.parse(embeddedJson);
|
const planObj = JSON.parse(embeddedJson);
|
||||||
|
|
||||||
// The AI plan is expected to have: planObj.milestones[]
|
|
||||||
if (planObj && Array.isArray(planObj.milestones)) {
|
if (planObj && Array.isArray(planObj.milestones)) {
|
||||||
|
const batchSeen = new Set();
|
||||||
|
|
||||||
for (const milestone of planObj.milestones) {
|
for (const milestone of planObj.milestones) {
|
||||||
const dupKey = `${(milestone.title || "").trim()}|${milestone.date}`;
|
const tNorm = String(milestone.title || '').trim().toLowerCase();
|
||||||
|
if (!tNorm) continue;
|
||||||
|
|
||||||
|
if (batchSeen.has(tNorm)) {
|
||||||
|
console.log('Skipping in-batch duplicate title:', milestone.title);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
batchSeen.add(tNorm);
|
||||||
|
|
||||||
|
const dupKey = `${(milestone.title || '').trim()}|${milestone.date}`;
|
||||||
if (existingTitles.includes(dupKey)) {
|
if (existingTitles.includes(dupKey)) {
|
||||||
console.log("Skipping duplicate milestone:", dupKey);
|
console.log('Skipping duplicate milestone:', dupKey);
|
||||||
continue; // do NOT insert
|
continue; // do NOT insert
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Skip generic titles
|
||||||
|
const GENERIC_RX =
|
||||||
|
/^(complete (leadership|management) (training|course|program)|seek mentorship|network( with|ing)?( (current|senior|management) (managers|professionals))?|attend (a )?networking event)$/i;
|
||||||
|
if (GENERIC_RX.test(milestone.title || '')) {
|
||||||
|
console.log('Skipping generic title:', milestone.title);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
// Create the milestone
|
// Create the milestone
|
||||||
const milestoneBody = {
|
const milestoneBody = {
|
||||||
title: milestone.title,
|
title: milestone.title,
|
||||||
description: milestone.description || "",
|
description: milestone.description || '',
|
||||||
date: milestone.date,
|
date: milestone.date,
|
||||||
career_profile_id: scenarioRow.id, // or scenarioRow.career_profile_id
|
career_profile_id: scenarioRow.id,
|
||||||
status: "planned",
|
status: 'planned',
|
||||||
progress: 0,
|
progress: 0,
|
||||||
is_universal: false
|
is_universal: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
// Call your existing milestone endpoint
|
// Call your existing milestone endpoint
|
||||||
@ -2101,7 +2323,7 @@ if (embeddedJson) { // <── instead of startsWith("{")…
|
|||||||
if (createdMilestonesData.length > 0) {
|
if (createdMilestonesData.length > 0) {
|
||||||
replyToClient = `
|
replyToClient = `
|
||||||
I've created ${createdMilestonesData.length} milestones (with tasks & impacts) for you in this scenario.
|
I've created ${createdMilestonesData.length} milestones (with tasks & impacts) for you in this scenario.
|
||||||
Check your Milestones tab. Let me know if you want any changes!
|
Check your Milestones section below-you may need to refresh the browser. Let me know if you want any changes!
|
||||||
`.trim();
|
`.trim();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2245,7 +2467,6 @@ Always end with: “AptivaAI is an educational tool – not advice.”
|
|||||||
if (!realKeys.length) payloadObj = null;
|
if (!realKeys.length) payloadObj = null;
|
||||||
|
|
||||||
/* 6️⃣ persist changes */
|
/* 6️⃣ persist changes */
|
||||||
const apiBase = process.env.APTIVA_INTERNAL_API || 'http://localhost:5002/api';
|
|
||||||
|
|
||||||
if (payloadObj?.cloneScenario) {
|
if (payloadObj?.cloneScenario) {
|
||||||
/* ------ CLONE ------ */
|
/* ------ CLONE ------ */
|
||||||
@ -2336,7 +2557,7 @@ app.get('/api/premium/retire/chat/threads/:id', authenticatePremiumUser, async (
|
|||||||
/* POST a message (auto-create thread if missing) */
|
/* POST a message (auto-create thread if missing) */
|
||||||
app.post('/api/premium/retire/chat/threads/:id/messages', authenticatePremiumUser, async (req, res) => {
|
app.post('/api/premium/retire/chat/threads/:id/messages', authenticatePremiumUser, async (req, res) => {
|
||||||
const { id } = req.params;
|
const { id } = req.params;
|
||||||
const { content = '', context = {} } = req.body || {};
|
const { content = '', context = {}, role = 'user' } = req.body || {};
|
||||||
if (!content.trim()) return res.status(400).json({ error: 'empty' });
|
if (!content.trim()) return res.status(400).json({ error: 'empty' });
|
||||||
|
|
||||||
// ensure thread exists (auto-create if missing)
|
// ensure thread exists (auto-create if missing)
|
||||||
@ -2351,23 +2572,33 @@ app.post('/api/premium/retire/chat/threads/:id/messages', authenticatePremiumUse
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// save user msg
|
// persist only visible user messages; hidden quick-action prompts come in as role="system"
|
||||||
|
if (role !== 'system') {
|
||||||
await pool.query(
|
await pool.query(
|
||||||
'INSERT INTO ai_chat_messages (thread_id,user_id,role,content) VALUES (?,?, "user", ?)',
|
'INSERT INTO ai_chat_messages (thread_id,user_id,role,content) VALUES (?,?, ?, ?)',
|
||||||
[id, req.id, content]
|
[id, req.id, role === 'assistant' ? 'assistant' : 'user', content]
|
||||||
);
|
);
|
||||||
|
}
|
||||||
|
|
||||||
// history (≤40)
|
// Get the latest 40, then restore chronological order
|
||||||
const [history] = await pool.query(
|
const [historyRows] = await pool.query(
|
||||||
'SELECT role,content FROM ai_chat_messages WHERE thread_id=? ORDER BY id ASC LIMIT 40',
|
'SELECT id, role, content FROM ai_chat_messages WHERE thread_id=? ORDER BY id DESC LIMIT 40',
|
||||||
[id]
|
[id]
|
||||||
);
|
);
|
||||||
|
const history = historyRows.reverse().map(({ role, content }) => ({ role, content }));
|
||||||
|
|
||||||
|
// If the caller provided a transient system card (quick action), append it only for this AI turn
|
||||||
|
const effectiveHistory =
|
||||||
|
role === 'system'
|
||||||
|
? [...history, { role: 'system', content }]
|
||||||
|
: history;
|
||||||
|
|
||||||
|
|
||||||
// call AI
|
// call AI
|
||||||
const resp = await internalFetch(req, '/premium/retirement/aichat', {
|
const resp = await internalFetch(req, '/premium/retirement/aichat', {
|
||||||
method : 'POST',
|
method : 'POST',
|
||||||
headers: { 'Content-Type':'application/json' },
|
headers: { 'Content-Type':'application/json' },
|
||||||
body : JSON.stringify({ prompt: content, scenario_id: context?.scenario_id, chatHistory: history })
|
body : JSON.stringify({ prompt: content, scenario_id: context?.scenario_id, chatHistory: effectiveHistory })
|
||||||
});
|
});
|
||||||
|
|
||||||
let reply = 'Sorry, please try again.';
|
let reply = 'Sorry, please try again.';
|
||||||
@ -2430,7 +2661,15 @@ app.get('/api/premium/coach/chat/threads/:id', authenticatePremiumUser, async (r
|
|||||||
/* POST a message (auto-create thread if missing) */
|
/* POST a message (auto-create thread if missing) */
|
||||||
app.post('/api/premium/coach/chat/threads/:id/messages', authenticatePremiumUser, async (req, res) => {
|
app.post('/api/premium/coach/chat/threads/:id/messages', authenticatePremiumUser, async (req, res) => {
|
||||||
const { id } = req.params;
|
const { id } = req.params;
|
||||||
const { content = '', context = {} } = req.body || {};
|
const {
|
||||||
|
content = '',
|
||||||
|
context = {},
|
||||||
|
role = 'user',
|
||||||
|
// NEW: honor one-turn context boost from quick actions
|
||||||
|
forceContext = false,
|
||||||
|
// NEW: persist the visible “Sure! …” note when the client sends a system card
|
||||||
|
assistantNote = null
|
||||||
|
} = req.body || {};
|
||||||
if (!content.trim()) return res.status(400).json({ error: 'empty' });
|
if (!content.trim()) return res.status(400).json({ error: 'empty' });
|
||||||
|
|
||||||
// ensure thread exists (auto-create if missing)
|
// ensure thread exists (auto-create if missing)
|
||||||
@ -2445,29 +2684,55 @@ app.post('/api/premium/coach/chat/threads/:id/messages', authenticatePremiumUser
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// save user msg
|
// persist only visible user messages; hidden quick-action prompts come in as role="system"
|
||||||
|
if (role !== 'system') {
|
||||||
await pool.query(
|
await pool.query(
|
||||||
'INSERT INTO ai_chat_messages (thread_id,user_id,role,content) VALUES (?,?, "user", ?)',
|
'INSERT INTO ai_chat_messages (thread_id,user_id,role,content) VALUES (?,?, ?, ?)',
|
||||||
[id, req.id, content]
|
[id, req.id, role === 'assistant' ? 'assistant' : 'user', content]
|
||||||
);
|
);
|
||||||
|
}
|
||||||
|
|
||||||
// history (≤40)
|
// NEW: When a quick action is triggered (role === 'system'), also persist the visible assistant note
|
||||||
const [history] = await pool.query(
|
if (role === 'system' && assistantNote && assistantNote.trim()) {
|
||||||
'SELECT role,content FROM ai_chat_messages WHERE thread_id=? ORDER BY id ASC LIMIT 40',
|
await pool.query(
|
||||||
|
'INSERT INTO ai_chat_messages (thread_id,user_id,role,content) VALUES (?,?, "assistant", ?)',
|
||||||
|
[id, req.id, assistantNote.trim()]
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the newest N rows so the model always sees the latest turns,
|
||||||
|
// then reverse back to chronological.
|
||||||
|
const [historyRows] = await pool.query(
|
||||||
|
'SELECT id, role, content FROM ai_chat_messages WHERE thread_id=? ORDER BY id DESC LIMIT 120',
|
||||||
[id]
|
[id]
|
||||||
);
|
);
|
||||||
|
const history = historyRows.reverse().map(({ role, content }) => ({ role, content }));
|
||||||
|
|
||||||
|
// If the caller provided a transient system card (quick action), append it only for this AI turn
|
||||||
|
const effectiveHistory =
|
||||||
|
role === 'system'
|
||||||
|
? [...history, { role: 'system', content }]
|
||||||
|
: history;
|
||||||
|
|
||||||
|
|
||||||
// call AI
|
// call AI
|
||||||
const resp = await internalFetch(req, '/premium/ai/chat', {
|
let resp;
|
||||||
|
try {
|
||||||
|
resp = await internalFetch(req, '/premium/ai/chat', {
|
||||||
method : 'POST',
|
method : 'POST',
|
||||||
headers: { 'Content-Type':'application/json' },
|
headers: { 'Content-Type':'application/json' },
|
||||||
body : JSON.stringify({ ...context, chatHistory: history })
|
body : JSON.stringify({ ...context, chatHistory: effectiveHistory, forceContext })
|
||||||
});
|
});
|
||||||
|
} catch (e) {
|
||||||
|
console.error('internalFetch(/premium/ai/chat) failed:', e);
|
||||||
|
return res.status(502).json({ error: 'upstream_failed' });
|
||||||
|
}
|
||||||
|
|
||||||
let reply = 'Sorry, please try again.';
|
let reply = 'Sorry, please try again.';
|
||||||
if (resp.ok) {
|
if (resp.ok) {
|
||||||
const json = await resp.json();
|
const json = await resp.json();
|
||||||
reply = (json?.reply || '').trim() || reply;
|
reply = (json?.reply || '').trim() || reply;
|
||||||
|
const created = Array.isArray(json?.createdMilestones) ? json.createdMilestones : [];
|
||||||
|
|
||||||
// save AI reply
|
// save AI reply
|
||||||
await pool.query(
|
await pool.query(
|
||||||
@ -2476,7 +2741,8 @@ app.post('/api/premium/coach/chat/threads/:id/messages', authenticatePremiumUser
|
|||||||
);
|
);
|
||||||
await pool.query('UPDATE ai_chat_threads SET updated_at=CURRENT_TIMESTAMP WHERE id=?', [id]);
|
await pool.query('UPDATE ai_chat_threads SET updated_at=CURRENT_TIMESTAMP WHERE id=?', [id]);
|
||||||
|
|
||||||
return res.json({ reply });
|
// NEW: surface created milestones to the frontend so it can refresh Roadmap
|
||||||
|
return res.json({ reply, createdMilestones: created });
|
||||||
} else {
|
} else {
|
||||||
return res.status(502).json({ error: 'upstream_failed' });
|
return res.status(502).json({ error: 'upstream_failed' });
|
||||||
}
|
}
|
||||||
@ -4096,6 +4362,8 @@ app.post('/api/premium/tasks', authenticatePremiumUser, async (req, res) => {
|
|||||||
return new Date(utc).toISOString();
|
return new Date(utc).toISOString();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const finalDue = due_date || null;
|
||||||
|
|
||||||
if (finalDue) { // only if task has a due date (incl. fallback)
|
if (finalDue) { // only if task has a due date (incl. fallback)
|
||||||
const [[profile]] = await pool.query(
|
const [[profile]] = await pool.query(
|
||||||
'SELECT phone_e164, phone_verified_at, sms_reminders_opt_in FROM user_profile WHERE id = ?',
|
'SELECT phone_e164, phone_verified_at, sms_reminders_opt_in FROM user_profile WHERE id = ?',
|
||||||
|
|||||||
@ -27,30 +27,23 @@ async function ensureCoachThread() {
|
|||||||
|
|
||||||
const isHiddenPrompt = (m) => {
|
const isHiddenPrompt = (m) => {
|
||||||
if (!m || !m.content) return false;
|
if (!m || !m.content) return false;
|
||||||
const c = String(m.content);
|
const c = String(m.content).trim();
|
||||||
// Heuristics that match your hidden prompts / modes
|
// Hide only true system/driver cards or ops payloads — not normal assistant prose.
|
||||||
return (
|
const looksLikeOpsBlock = /^```ops/i.test(c);
|
||||||
m.role === 'system' ||
|
const looksLikeDriver = c.startsWith('# ⛔️') || c.startsWith('MODE :') || c.startsWith('MODE:');
|
||||||
c.startsWith('# ⛔️') ||
|
return m.role === 'system' || looksLikeDriver || looksLikeOpsBlock;
|
||||||
c.startsWith('MODE :') ||
|
|
||||||
c.startsWith('MODE:') ||
|
|
||||||
c.includes('"milestones"') && c.includes('"tasks"') && c.includes('"date"') && c.includes('"title"')
|
|
||||||
);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
function buildInterviewPrompt(careerName, jobDescription = "") {
|
function buildOneShotInterviewPrompt(careerName, jobDescription = "") {
|
||||||
return `
|
return `
|
||||||
You are an expert interviewer for the role **${careerName}**.
|
MODE: interview_one_shot
|
||||||
Ask one challenging behavioural or technical question **specific to this career**,
|
You are an expert interviewer for the role **${careerName}**${jobDescription ? ` (context: ${jobDescription})` : ''}.
|
||||||
wait for the candidate's reply, then:
|
Produce EXACTLY ONE interview question tailored to this role.
|
||||||
|
Output EXACTLY:
|
||||||
• Score the answer 1–5
|
Interview question: <one specific question>
|
||||||
• Give concise feedback (1-2 sentences)
|
<!-- interview-oneshot-q -->
|
||||||
• Ask the next question (up to 5 total)
|
No preface, no numbering, no extra text, no scoring, no follow‑ups.
|
||||||
|
Do NOT output milestones JSON.`.trim();
|
||||||
After 5 questions or if the user types "quit interview", end the session.
|
|
||||||
|
|
||||||
Do NOT output milestones JSON.`;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ----------------------------------------------
|
/* ----------------------------------------------
|
||||||
@ -327,6 +320,7 @@ I'm here to support you with personalized coaching. What would you like to focus
|
|||||||
/* ------------ shared AI caller ------------- */
|
/* ------------ shared AI caller ------------- */
|
||||||
async function callAi(updatedHistory, opts = {}) {
|
async function callAi(updatedHistory, opts = {}) {
|
||||||
setLoading(true);
|
setLoading(true);
|
||||||
|
let replyText = ''; // the visible text we’ll append once
|
||||||
try {
|
try {
|
||||||
let id = threadId; // <-- declare it
|
let id = threadId; // <-- declare it
|
||||||
if (!id) { // first send or race
|
if (!id) { // first send or race
|
||||||
@ -341,23 +335,80 @@ I'm here to support you with personalized coaching. What would you like to focus
|
|||||||
headers: { 'Content-Type': 'application/json' },
|
headers: { 'Content-Type': 'application/json' },
|
||||||
body: JSON.stringify({
|
body: JSON.stringify({
|
||||||
content : updatedHistory.at(-1)?.content || '',
|
content : updatedHistory.at(-1)?.content || '',
|
||||||
context
|
role : updatedHistory.at(-1)?.role || 'user',
|
||||||
})
|
context,
|
||||||
|
// allow one-turn context push for Quick Actions
|
||||||
|
forceContext : Boolean(opts.forceContext),
|
||||||
|
// NEW: persist the visible “Sure! …” assistant line into the thread history
|
||||||
|
assistantNote: opts.assistantNote || null
|
||||||
|
}),
|
||||||
|
// Quick Actions can take ~10s while milestones + tasks are created
|
||||||
|
timeoutMs: 30000,
|
||||||
|
retryNonIdempotent: true
|
||||||
});
|
});
|
||||||
|
|
||||||
let reply = 'Sorry, something went wrong.';
|
if (r && r.status >= 200 && r.status < 300) {
|
||||||
if (r.ok && (r.headers.get('content-type') || '').includes('application/json')) {
|
// 1) Try to read counts and reply from the JSON (if present)
|
||||||
|
try {
|
||||||
|
const ct = (r.headers.get('content-type') || '').toLowerCase();
|
||||||
|
if (ct.includes('application/json')) {
|
||||||
const data = await r.json();
|
const data = await r.json();
|
||||||
reply = (data?.reply || '').trim() || reply;
|
const created = Array.isArray(data?.createdMilestones) ?
|
||||||
|
data.createdMilestones.length : 0;
|
||||||
|
|
||||||
|
if (created > 0 && typeof onMilestonesCreated === 'function')
|
||||||
|
onMilestonesCreated(created);
|
||||||
|
|
||||||
|
replyText = (data?.reply || '').trim(); // may be empty if the model only emitted JSON
|
||||||
|
|
||||||
|
if (created > 0) {
|
||||||
|
try { window.dispatchEvent(new CustomEvent('aptiva:milestones:changed', { detail:{ scenarioId:
|
||||||
|
scenarioRow?.id, count: created } }));
|
||||||
}
|
}
|
||||||
setMessages(prev => [...prev, { role: 'assistant', content: reply }]);
|
catch {}
|
||||||
} catch (e) {
|
}
|
||||||
console.error(e);
|
}
|
||||||
|
} catch {/* fine; we’ll pull from thread */}
|
||||||
|
|
||||||
|
// 2) Only fall back to thread read if we didn't get a reply in JSON
|
||||||
|
if (!replyText) {
|
||||||
|
try {
|
||||||
|
const th = await authFetch(`/api/premium/coach/chat/threads/${id}`, { timeoutMs: 30000 });
|
||||||
|
if (th.ok && (th.headers.get('content-type') || '').includes('application/json')) {
|
||||||
|
const { messages: msgs = [] } = await th.json();
|
||||||
|
const visible = (Array.isArray(msgs) ? msgs : []).filter(m => !isHiddenPrompt(m));
|
||||||
|
const lastAssistant = [...visible].reverse().find(m => m.role === 'assistant');
|
||||||
|
if (lastAssistant?.content) replyText = String(lastAssistant.content);
|
||||||
|
}
|
||||||
|
} catch {/* ignore */}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// 🔁 Salvage: the backend may have saved the reply or created milestones before erroring
|
||||||
|
try {
|
||||||
|
const th = await authFetch(`/api/premium/coach/chat/threads/${id}`, { timeoutMs: 30000 });
|
||||||
|
if (th.ok && (th.headers.get('content-type') || '').includes('application/json')) {
|
||||||
|
const { messages: msgs = [] } = await th.json();
|
||||||
|
const visible = (Array.isArray(msgs) ? msgs : []).filter(m => !isHiddenPrompt(m));
|
||||||
|
const lastAssistant = [...visible].reverse().find(m => m.role === 'assistant');
|
||||||
|
if (lastAssistant?.content) replyText = String(lastAssistant.content);
|
||||||
|
}
|
||||||
|
} catch {}
|
||||||
|
if (!replyText) {
|
||||||
setMessages(prev => [...prev, { role: 'assistant', content: 'Sorry, something went wrong.' }]);
|
setMessages(prev => [...prev, { role: 'assistant', content: 'Sorry, something went wrong.' }]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
if (e?.name !== 'AbortError') { console.error(e);
|
||||||
|
setMessages(prev => [...prev, { role: 'assistant', content: 'Sorry, something went wrong.' }]);
|
||||||
|
}
|
||||||
} finally {
|
} finally {
|
||||||
setLoading(false);
|
setLoading(false);
|
||||||
|
if (replyText) {
|
||||||
|
setMessages(prev => [...prev, { role: 'assistant', content: replyText }]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/* ------------ normal send ------------- */
|
/* ------------ normal send ------------- */
|
||||||
function handleSubmit(e) {
|
function handleSubmit(e) {
|
||||||
@ -386,11 +437,10 @@ I'm here to support you with personalized coaching. What would you like to focus
|
|||||||
/* 1) Mock-Interview (special flow) */
|
/* 1) Mock-Interview (special flow) */
|
||||||
if (type === "interview") {
|
if (type === "interview") {
|
||||||
const desc = scenarioRow?.job_description || "";
|
const desc = scenarioRow?.job_description || "";
|
||||||
const hiddenSystem = { role:"system", content: buildInterviewPrompt(careerName, desc) };
|
const hiddenSystem = { role:"system", content: buildOneShotInterviewPrompt(careerName, desc) };
|
||||||
const note = { role:"assistant", content:`Starting mock interview on **${careerName}**. Answer each question and I'll give feedback!` };
|
// No visible preface; just ask the question.
|
||||||
const updated = [...messages, note, hiddenSystem];
|
const updated = [...messages, hiddenSystem];
|
||||||
setMessages([...messages, note]);
|
callAi(updated, { forceContext: true });
|
||||||
callAi(updated);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -414,7 +464,10 @@ I'm here to support you with personalized coaching. What would you like to focus
|
|||||||
const updated = [...messages, note, hiddenSystem];
|
const updated = [...messages, note, hiddenSystem];
|
||||||
setMessages([...messages, note]);
|
setMessages([...messages, note]);
|
||||||
const needsContext = ["networking", "jobSearch", "aiGrowth"].includes(type);
|
const needsContext = ["networking", "jobSearch", "aiGrowth"].includes(type);
|
||||||
callAi(updated, {forceContext: needsContext});
|
callAi(updated, {
|
||||||
|
forceContext: needsContext,
|
||||||
|
assistantNote: note.content
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -576,24 +576,6 @@ useEffect(() => {
|
|||||||
return () => clearTimeout(timer);
|
return () => clearTimeout(timer);
|
||||||
}, [buttonDisabled]);
|
}, [buttonDisabled]);
|
||||||
|
|
||||||
/* ------------------------------------------------------------------
|
|
||||||
* 1) Restore AI recommendations (unchanged behaviour)
|
|
||||||
* -----------------------------------------------------------------*/
|
|
||||||
useEffect(() => {
|
|
||||||
const json = localStorage.getItem('aiRecommendations');
|
|
||||||
if (!json) return;
|
|
||||||
|
|
||||||
try {
|
|
||||||
const arr = JSON.parse(json).map((m) => ({
|
|
||||||
...m,
|
|
||||||
id: m.id || crypto.randomUUID()
|
|
||||||
}));
|
|
||||||
setRecommendations(arr);
|
|
||||||
} catch (err) {
|
|
||||||
console.error('Error parsing stored AI recs', err);
|
|
||||||
}
|
|
||||||
}, []);
|
|
||||||
|
|
||||||
/* ------------------------------------------------------------------
|
/* ------------------------------------------------------------------
|
||||||
* 2) Whenever the careerProfileId changes, clear the modal check flag
|
* 2) Whenever the careerProfileId changes, clear the modal check flag
|
||||||
* -----------------------------------------------------------------*/
|
* -----------------------------------------------------------------*/
|
||||||
@ -1370,6 +1352,18 @@ const handleMilestonesCreated = useCallback(
|
|||||||
[fetchMilestones]
|
[fetchMilestones]
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Refresh milestones when the chat layer announces changes
|
||||||
|
useEffect(() => {
|
||||||
|
const handler = (e) => {
|
||||||
|
// If an event specifies a scenarioId, ignore other scenarios
|
||||||
|
const incoming = e?.detail?.scenarioId;
|
||||||
|
if (incoming && String(incoming) !== String(careerProfileId)) return;
|
||||||
|
fetchMilestones();
|
||||||
|
};
|
||||||
|
window.addEventListener('aptiva:milestones:changed', handler);
|
||||||
|
return () => window.removeEventListener('aptiva:milestones:changed', handler);
|
||||||
|
}, [careerProfileId, fetchMilestones]);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className="milestone-tracker max-w-screen-lg mx-auto px-4 py-6 space-y-4">
|
<div className="milestone-tracker max-w-screen-lg mx-auto px-4 py-6 space-y-4">
|
||||||
|
|
||||||
|
|||||||
@ -58,6 +58,8 @@ export default function ChatDrawer({
|
|||||||
const [prompt, setPrompt] = useState('');
|
const [prompt, setPrompt] = useState('');
|
||||||
const [messages, setMessages] = useState([]); // { role, content }
|
const [messages, setMessages] = useState([]); // { role, content }
|
||||||
const listRef = useRef(null);
|
const listRef = useRef(null);
|
||||||
|
const SUPPORT_INTRO =
|
||||||
|
"Hi — Aptiva Support here. I can help with CareerExplorer, account/billing, or technical issues. What do you need?";
|
||||||
|
|
||||||
/* auto-scroll on incoming messages */
|
/* auto-scroll on incoming messages */
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
@ -74,14 +76,16 @@ export default function ChatDrawer({
|
|||||||
const r = await fetch(`/api/chat/threads/${id}`, { credentials: 'include' });
|
const r = await fetch(`/api/chat/threads/${id}`, { credentials: 'include' });
|
||||||
if (r.ok) {
|
if (r.ok) {
|
||||||
const { messages: msgs } = await r.json();
|
const { messages: msgs } = await r.json();
|
||||||
setMessages(msgs || []);
|
setMessages((Array.isArray(msgs) && msgs.length)
|
||||||
|
? msgs
|
||||||
|
: [{ role: 'assistant', content: SUPPORT_INTRO }]);
|
||||||
} else {
|
} else {
|
||||||
// don’t crash UI on preload failure
|
// don’t crash UI on preload failure
|
||||||
setMessages([]);
|
setMessages([{ role: 'assistant', content: SUPPORT_INTRO }]);
|
||||||
}
|
}
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
console.error('[Support preload]', e);
|
console.error('[Support preload]', e);
|
||||||
setMessages([]);
|
setMessages([{ role: 'assistant', content: SUPPORT_INTRO }]);
|
||||||
}
|
}
|
||||||
})();
|
})();
|
||||||
}, []);
|
}, []);
|
||||||
@ -232,6 +236,10 @@ export default function ChatDrawer({
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div className="border-t p-4">
|
<div className="border-t p-4">
|
||||||
|
{/* Persistent disclaimer */}
|
||||||
|
<div className="text-xs text-gray-600 mb-2">
|
||||||
|
⚠ Aptiva bots may be incomplete or inaccurate. Verify important details before acting.
|
||||||
|
</div>
|
||||||
<form
|
<form
|
||||||
onSubmit={(e) => {
|
onSubmit={(e) => {
|
||||||
e.preventDefault();
|
e.preventDefault();
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user