Added AI agent to Retirement
This commit is contained in:
parent
4af7300117
commit
fe2ec2d3c1
@ -517,7 +517,7 @@ Return ONLY a JSON array, no extra text:
|
|||||||
// 5) Call OpenAI (ignoring scenarioRow.start_date for date logic)
|
// 5) Call OpenAI (ignoring scenarioRow.start_date for date logic)
|
||||||
const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
|
const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
|
||||||
const completion = await openai.chat.completions.create({
|
const completion = await openai.chat.completions.create({
|
||||||
model: 'gpt-3.5-turbo', // or 'gpt-4'
|
model: 'gpt-4o-mini', // or 'gpt-4'
|
||||||
messages,
|
messages,
|
||||||
temperature: 0.7,
|
temperature: 0.7,
|
||||||
max_tokens: 600
|
max_tokens: 600
|
||||||
@ -1140,7 +1140,7 @@ messagesToSend.push(...chatHistory.slice(-MAX_CHAT_TURNS));
|
|||||||
// ------------------------------------------------
|
// ------------------------------------------------
|
||||||
const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
|
const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
|
||||||
const completion = await openai.chat.completions.create({
|
const completion = await openai.chat.completions.create({
|
||||||
model: "gpt-3.5-turbo-0125",
|
model: "gpt-4o-mini",
|
||||||
messages: messagesToSend,
|
messages: messagesToSend,
|
||||||
temperature: 0.3,
|
temperature: 0.3,
|
||||||
max_tokens: 1000
|
max_tokens: 1000
|
||||||
@ -1335,6 +1335,159 @@ Check your Milestones tab. Let me know if you want any changes!
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
/* ──────────────────────────────────────────────
|
||||||
|
RETIREMENT AI-CHAT ENDPOINT
|
||||||
|
─────────────────────────────────────────── */
|
||||||
|
app.post(
|
||||||
|
'/api/premium/retirement/aichat',
|
||||||
|
authenticatePremiumUser,
|
||||||
|
async (req, res) => {
|
||||||
|
try {
|
||||||
|
/* 0) ── pull + sanity-check inputs ─────────────── */
|
||||||
|
const {
|
||||||
|
prompt = '',
|
||||||
|
scenario_id = '',
|
||||||
|
chatHistory = []
|
||||||
|
} = req.body || {};
|
||||||
|
|
||||||
|
if (!prompt.trim()) return res.status(400).json({ error: 'Prompt is required.' });
|
||||||
|
if (!scenario_id) return res.status(400).json({ error: 'scenario_id is required.' });
|
||||||
|
|
||||||
|
/* 1) ── ownership guard ────────────────────────── */
|
||||||
|
const [[scenario]] = await pool.query(
|
||||||
|
'SELECT * FROM career_profiles WHERE id = ? AND user_id = ?',
|
||||||
|
[scenario_id, req.id]
|
||||||
|
);
|
||||||
|
if (!scenario) return res.status(404).json({ error: 'Scenario not found.' });
|
||||||
|
|
||||||
|
/* 2) ── locate the *text* of the last user turn ─ */
|
||||||
|
let userMsgStr = prompt.trim();
|
||||||
|
if (Array.isArray(chatHistory)) {
|
||||||
|
for (let i = chatHistory.length - 1; i >= 0; i--) {
|
||||||
|
const m = chatHistory[i];
|
||||||
|
if (m?.role === 'user' && typeof m.content === 'string') {
|
||||||
|
userMsgStr = m.content;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* helper: force every .content to be a plain string */
|
||||||
|
const toStr = v =>
|
||||||
|
v === null || v === undefined
|
||||||
|
? ''
|
||||||
|
: typeof v === 'string'
|
||||||
|
? v
|
||||||
|
: JSON.stringify(v);
|
||||||
|
|
||||||
|
const sanitizedHistory = (Array.isArray(chatHistory) ? chatHistory : [])
|
||||||
|
.map(({ role = 'user', content = '' }) => ({ role, content: toStr(content) }));
|
||||||
|
|
||||||
|
/* 3) ── system instructions ────────────────────── */
|
||||||
|
const systemMsg = `
|
||||||
|
You are AptivaAI's retirement-planning coach.
|
||||||
|
Rules:
|
||||||
|
• Educational guidance only — **NO** personalised investment advice.
|
||||||
|
• Never recommend specific securities or products.
|
||||||
|
• Friendly tone; ≤ 180 words.
|
||||||
|
• If the scenario needs updating, append a JSON block:
|
||||||
|
\`\`\`json
|
||||||
|
{ "retirement_start_date": "2045-01-01" }
|
||||||
|
\`\`\`
|
||||||
|
If nothing changes, just return “{"noop":true}”.
|
||||||
|
• Always end with: “AptivaAI is an educational tool – not advice.”
|
||||||
|
`.trim();
|
||||||
|
|
||||||
|
/* 4) ── call OpenAI ─────────────────────────────── */
|
||||||
|
const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
|
||||||
|
const chatRes = await openai.chat.completions.create({
|
||||||
|
model : 'gpt-4o-mini',
|
||||||
|
temperature : 0.6,
|
||||||
|
max_tokens : 600,
|
||||||
|
messages : [
|
||||||
|
{ role: 'system', content: systemMsg },
|
||||||
|
...sanitizedHistory,
|
||||||
|
{ role: 'user', content: userMsgStr }
|
||||||
|
]
|
||||||
|
});
|
||||||
|
|
||||||
|
const raw = (chatRes.choices?.[0]?.message?.content || '').trim();
|
||||||
|
res.set({
|
||||||
|
'X-OpenAI-Prompt-Tokens' : chatRes.usage?.prompt_tokens ?? 0,
|
||||||
|
'X-OpenAI-Completion-Tokens': chatRes.usage?.completion_tokens ?? 0
|
||||||
|
});
|
||||||
|
|
||||||
|
/* 5) ── extract (or ignore) JSON patch ──────────── */
|
||||||
|
let visibleReply = raw;
|
||||||
|
let scenarioPatch = null;
|
||||||
|
|
||||||
|
// A. fenced ```json```? ───────────────────────────
|
||||||
|
let match = raw.match(/```json\s*([\s\S]+?)```/i);
|
||||||
|
|
||||||
|
// B. or a “loose” top-level { … }? (no fences) ─────
|
||||||
|
if (!match) {
|
||||||
|
const open = raw.search(/[{\[]/);
|
||||||
|
if (open !== -1) {
|
||||||
|
const close = Math.max(raw.lastIndexOf('}'), raw.lastIndexOf(']'));
|
||||||
|
if (close > open) match = [ , raw.slice(open, close + 1) ];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (match) {
|
||||||
|
try { scenarioPatch = JSON.parse(match[1]); } catch {/* ignore bad JSON */}
|
||||||
|
visibleReply = raw.replace(match[0] || match[1], '').trim();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ignore {"noop":true} or empty objects */
|
||||||
|
if (
|
||||||
|
!scenarioPatch ||
|
||||||
|
!Object.keys(scenarioPatch)
|
||||||
|
.filter(k => k !== 'noop')
|
||||||
|
.length
|
||||||
|
) {
|
||||||
|
scenarioPatch = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* 6) ── persist real changes ───────────────────── */
|
||||||
|
if (scenarioPatch) {
|
||||||
|
const fields = Object.keys(scenarioPatch);
|
||||||
|
const setters = fields.map(f => `${f} = ?`).join(', ');
|
||||||
|
const values = fields.map(f => scenarioPatch[f]);
|
||||||
|
|
||||||
|
await pool.query(
|
||||||
|
`UPDATE career_profiles
|
||||||
|
SET ${setters},
|
||||||
|
updated_at = CURRENT_TIMESTAMP
|
||||||
|
WHERE id = ? AND user_id = ?`,
|
||||||
|
[...values, scenario_id, req.id]
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* if the patch included a new retirement_start_date, sync the milestone */
|
||||||
|
if (scenarioPatch?.retirement_start_date) {
|
||||||
|
await pool.query(
|
||||||
|
`UPDATE milestones
|
||||||
|
SET date = ?,
|
||||||
|
updated_at = CURRENT_TIMESTAMP
|
||||||
|
WHERE career_profile_id = ?
|
||||||
|
AND user_id = ?
|
||||||
|
AND LOWER(title) LIKE 'retirement%'`,
|
||||||
|
[scenarioPatch.retirement_start_date, scenario_id, req.id]
|
||||||
|
);
|
||||||
|
// (optional) if no milestone matched, you could INSERT one here instead
|
||||||
|
}
|
||||||
|
|
||||||
|
/* 7) ── send to client ─────────────────────────── */
|
||||||
|
return res.json({
|
||||||
|
reply: visibleReply || 'Sorry, no response – please try again.',
|
||||||
|
...(scenarioPatch ? { scenarioPatch } : {})
|
||||||
|
});
|
||||||
|
} catch (err) {
|
||||||
|
console.error('retirement/aichat error:', err);
|
||||||
|
return res.status(500).json({ error: 'Internal error – please try again later.' });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
/***************************************************
|
/***************************************************
|
||||||
AI MILESTONE CONVERSION ENDPOINT
|
AI MILESTONE CONVERSION ENDPOINT
|
||||||
@ -1488,7 +1641,7 @@ app.post('/api/premium/ai-risk-analysis', authenticatePremiumUser, async (req, r
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2) If missing, call GPT-3.5 to generate analysis
|
// 2) If missing, call GPT-4 to generate analysis
|
||||||
const prompt = `
|
const prompt = `
|
||||||
The user has a career named: ${careerName}
|
The user has a career named: ${careerName}
|
||||||
Description: ${jobDescription}
|
Description: ${jobDescription}
|
||||||
@ -1504,7 +1657,7 @@ app.post('/api/premium/ai-risk-analysis', authenticatePremiumUser, async (req, r
|
|||||||
`;
|
`;
|
||||||
|
|
||||||
const completion = await openai.chat.completions.create({
|
const completion = await openai.chat.completions.create({
|
||||||
model: 'gpt-3.5-turbo',
|
model: "gpt-4o-mini",
|
||||||
messages: [{ role: 'user', content: prompt }],
|
messages: [{ role: 'user', content: prompt }],
|
||||||
temperature: 0.3,
|
temperature: 0.3,
|
||||||
max_tokens: 200,
|
max_tokens: 200,
|
||||||
@ -1574,7 +1727,7 @@ app.post('/api/public/ai-risk-analysis', async (req, res) => {
|
|||||||
|
|
||||||
const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
|
const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
|
||||||
const completion = await openai.chat.completions.create({
|
const completion = await openai.chat.completions.create({
|
||||||
model: 'gpt-3.5-turbo',
|
model: 'gpt-4o-mini',
|
||||||
messages: [{ role: 'user', content: prompt }],
|
messages: [{ role: 'user', content: prompt }],
|
||||||
temperature: 0.3,
|
temperature: 0.3,
|
||||||
max_tokens: 200,
|
max_tokens: 200,
|
||||||
|
Loading…
Reference in New Issue
Block a user