Skip to main content

AI-Powered Marketing Automation

This document specifies AI/ML and LLM capabilities for automated marketing and customer engagement in the CRM platform.


Executive Summary

Extend the CRM's marketing automation with AI capabilities:

CapabilityTechnologyValue
Content GenerationLLM (Claude/GPT)Auto-generate personalized messages
Send Time OptimizationMLPredict best time to reach each customer
Natural Language SegmentsLLM"Find golfers who..." → segment rules
Journey OptimizationML + LLMAuto-tune journeys based on performance
Personalization EngineMLDynamic content per customer
Churn InterventionsML + LLMRecommended actions for at-risk customers

1. LLM Content Generation

1.1 Use Cases

Use CaseInputOutput
Email subject linesCampaign context, audience5 subject line variants
Email bodyTemplate, customer dataPersonalized email HTML
SMS messagesCampaign goal, char limitConcise SMS text
Social postsEvent details, platformPlatform-optimized post
Push notificationsAction goalShort, compelling copy

1.2 Content Generation Service

// crm/services/src/ai/content-generation.service.ts

interface ContentGenerationRequest {
type: 'EMAIL_SUBJECT' | 'EMAIL_BODY' | 'SMS' | 'SOCIAL_POST' | 'PUSH';

// Context
campaignGoal: string; // "Drive tournament registrations"
audience: AudienceContext; // Segment characteristics
tone: 'FORMAL' | 'FRIENDLY' | 'URGENT' | 'PLAYFUL';

// Constraints
maxLength?: number; // Character limit
includeEmoji?: boolean;
callToAction?: string; // Required CTA

// Personalization tokens available
availableTokens: string[]; // ['firstName', 'membershipTier', 'lastBookingDate']

// Brand guidelines
brandVoice?: string; // "Professional but approachable"
avoidWords?: string[]; // Words to exclude
}

interface AudienceContext {
segmentName: string;
segmentSize: number;
characteristics: {
avgAge?: number;
avgHandicap?: number;
primaryMembershipTier?: string;
topActivities?: string[];
engagementLevel: 'HIGH' | 'MEDIUM' | 'LOW';
};
}

@Injectable()
export class ContentGenerationService {
constructor(
private readonly llmClient: LLMClient,
private readonly templateService: TemplateService,
) {}

/**
* Generate content variants for A/B testing
*/
async generateVariants(
request: ContentGenerationRequest,
variantCount: number = 3
): Promise<ContentVariant[]> {
const systemPrompt = this.buildSystemPrompt(request);
const userPrompt = this.buildUserPrompt(request, variantCount);

const response = await this.llmClient.generate({
model: 'claude-3-haiku', // Fast, cost-effective for content
systemPrompt,
userPrompt,
temperature: 0.8, // Higher creativity for variants
maxTokens: 1000,
});

return this.parseVariants(response, request.type);
}

/**
* Personalize content for a specific customer
*/
async personalizeForCustomer(
templateContent: string,
customer: CustomerProfile,
context: PersonalizationContext
): Promise<string> {
// First: Token replacement (fast, deterministic)
let content = this.replaceTokens(templateContent, customer);

// Second: LLM personalization (if enabled)
if (context.llmPersonalization) {
content = await this.llmPersonalize(content, customer, context);
}

return content;
}

private buildSystemPrompt(request: ContentGenerationRequest): string {
return `You are a marketing copywriter for a golf club management platform.

BRAND VOICE: ${request.brandVoice || 'Professional, warm, and golf-enthusiast friendly'}

RULES:
- Never use placeholder text like [Name] - use the exact token format: {{firstName}}
- Keep copy concise and action-oriented
- ${request.includeEmoji ? 'Use relevant emojis sparingly' : 'Do not use emojis'}
- Avoid: ${request.avoidWords?.join(', ') || 'spam trigger words, ALL CAPS'}
- Available personalization tokens: ${request.availableTokens.join(', ')}

OUTPUT FORMAT: Return valid JSON array of variants.`;
}

private buildUserPrompt(
request: ContentGenerationRequest,
variantCount: number
): string {
const typeInstructions = {
EMAIL_SUBJECT: `Generate ${variantCount} email subject lines. Max 60 characters each. Make them compelling and varied in approach (curiosity, urgency, benefit-focused, etc.)`,
EMAIL_BODY: `Generate email body copy. Use HTML formatting. Include a clear CTA button.`,
SMS: `Generate ${variantCount} SMS messages. Max ${request.maxLength || 160} characters. Include CTA.`,
SOCIAL_POST: `Generate ${variantCount} social media posts. Platform-appropriate length and hashtags.`,
PUSH: `Generate ${variantCount} push notification texts. Max 50 characters. Urgent and actionable.`,
};

return `
CAMPAIGN GOAL: ${request.campaignGoal}

AUDIENCE:
- Segment: ${request.audience.segmentName} (${request.audience.segmentSize} members)
- Engagement: ${request.audience.characteristics.engagementLevel}
- Primary tier: ${request.audience.characteristics.primaryMembershipTier}
${request.audience.characteristics.avgHandicap ? `- Avg handicap: ${request.audience.characteristics.avgHandicap}` : ''}

TONE: ${request.tone}
${request.callToAction ? `REQUIRED CTA: ${request.callToAction}` : ''}

TASK: ${typeInstructions[request.type]}

Return as JSON: { "variants": [{ "content": "...", "approach": "..." }] }
`;
}
}

1.3 Content Variants for A/B Testing

interface ContentVariant {
id: string;
content: string;
approach: string; // "urgency", "curiosity", "social-proof"
estimatedPerformance?: {
predictedOpenRate: number;
predictedClickRate: number;
confidence: number;
};
}

// Integration with SPLIT step
async function generateSplitVariants(
journeyId: string,
stepConfig: SendStepConfig
): Promise<SplitStepConfig> {
const variants = await contentService.generateVariants({
type: 'EMAIL_SUBJECT',
campaignGoal: stepConfig.goal,
audience: await getJourneyAudience(journeyId),
tone: 'FRIENDLY',
}, 3);

return {
variants: variants.map((v, i) => ({
name: `Variant ${String.fromCharCode(65 + i)}`, // A, B, C
weight: Math.floor(100 / variants.length),
config: {
...stepConfig,
subject: v.content,
subjectApproach: v.approach,
},
})),
trackingMetric: 'OPEN_RATE',
autoSelectWinner: true,
winnerThreshold: 0.95, // 95% confidence
};
}

2. Send Time Optimization (STO)

2.1 Concept

Predict the optimal time to send messages to each customer based on their historical engagement patterns.

┌─────────────────────────────────────────────────────────────────┐
│ SEND TIME OPTIMIZATION │
├─────────────────────────────────────────────────────────────────┤
│ │
│ Customer A: Opens emails at 7am (before golf) │
│ Customer B: Opens emails at 8pm (evening browser) │
│ Customer C: Opens emails at lunch (12-1pm) │
│ │
│ Campaign scheduled for "Morning" │
│ ──────────────────────────────── │
│ Customer A: Send at 6:45am │
│ Customer B: Hold until 7:45pm │
│ Customer C: Send at 11:45am │
│ │
└─────────────────────────────────────────────────────────────────┘

2.2 Send Time Model

// crm/services/src/ai/send-time-optimization.service.ts

interface CustomerSendTimeProfile {
customerId: string;

// Predicted optimal windows (ranked)
optimalWindows: TimeWindow[];

// Engagement patterns
patterns: {
bestDayOfWeek: number; // 0-6
bestHourOfDay: number; // 0-23
timezone: string;

// Detailed hourly probabilities
hourlyOpenProbability: number[]; // 24 values
dailyOpenProbability: number[]; // 7 values
};

// Model confidence
confidence: number; // 0-1
sampleSize: number; // Emails used for training
lastUpdated: Date;
}

interface TimeWindow {
dayOfWeek: number | 'ANY';
startHour: number;
endHour: number;
expectedOpenRate: number;
rank: number;
}

@Injectable()
export class SendTimeOptimizationService {
/**
* Get optimal send time for a customer
*/
async getOptimalSendTime(
customerId: string,
constraints: SendTimeConstraints
): Promise<Date> {
const profile = await this.getSendTimeProfile(customerId);

if (profile.confidence < 0.3) {
// Not enough data - use segment/global average
return this.getSegmentOptimalTime(customerId, constraints);
}

return this.calculateOptimalTime(profile, constraints);
}

/**
* Build send time profile from engagement history
*/
async buildSendTimeProfile(customerId: string): Promise<CustomerSendTimeProfile> {
// Get last 6 months of email interactions
const interactions = await this.prisma.campaignInteraction.findMany({
where: {
customerId,
type: { in: ['SENT', 'OPENED', 'CLICKED'] },
occurredAt: { gte: this.sixMonthsAgo() },
},
orderBy: { occurredAt: 'asc' },
});

// Group by sent time, calculate open rates
const hourlyStats = this.calculateHourlyStats(interactions);
const dailyStats = this.calculateDailyStats(interactions);

// Find optimal windows
const optimalWindows = this.findOptimalWindows(hourlyStats, dailyStats);

return {
customerId,
optimalWindows,
patterns: {
bestDayOfWeek: this.findBestDay(dailyStats),
bestHourOfDay: this.findBestHour(hourlyStats),
timezone: await this.inferTimezone(customerId),
hourlyOpenProbability: hourlyStats.map(h => h.openRate),
dailyOpenProbability: dailyStats.map(d => d.openRate),
},
confidence: this.calculateConfidence(interactions.length),
sampleSize: interactions.length,
lastUpdated: new Date(),
};
}

/**
* Batch schedule campaign with personalized send times
*/
async scheduleWithOptimization(
campaignId: string,
customerIds: string[],
constraints: SendTimeConstraints
): Promise<ScheduleResult> {
const schedules: CustomerSchedule[] = [];

for (const customerId of customerIds) {
const optimalTime = await this.getOptimalSendTime(customerId, constraints);
schedules.push({ customerId, scheduledAt: optimalTime });
}

// Group by time slot for efficient batch sending
const batches = this.groupByTimeSlot(schedules, 15); // 15-min slots

// Queue batches
for (const batch of batches) {
await this.messageQueue.add('send-batch', {
campaignId,
customerIds: batch.customerIds,
}, {
delay: batch.scheduledAt.getTime() - Date.now(),
});
}

return {
totalScheduled: schedules.length,
timeSpread: this.calculateTimeSpread(schedules),
batches: batches.length,
};
}
}

interface SendTimeConstraints {
// Allowed sending window
earliestHour: number; // e.g., 8 (8am)
latestHour: number; // e.g., 20 (8pm)
allowedDays: number[]; // [1,2,3,4,5] = weekdays

// Campaign constraints
mustSendWithin: number; // Hours from now
respectQuietHours: boolean; // No sends during sleep hours

// Timezone handling
useCustomerTimezone: boolean;
fallbackTimezone: string;
}

2.3 Send Time Features

interface SendTimeFeatures {
// Historical engagement
avgOpenHour: number;
avgClickHour: number;
stdDevOpenHour: number;

// Behavioral patterns
isWeekendEngager: boolean;
isMorningPerson: boolean;
isEveningPerson: boolean;

// Activity correlation
typicalBookingHour: number; // When they book tee times
typicalActivityHour: number; // When they're active in app

// External signals
inferredWorkSchedule: 'STANDARD' | 'SHIFT' | 'FLEXIBLE' | 'RETIRED';
golfRoundPreference: 'EARLY_MORNING' | 'MIDDAY' | 'AFTERNOON' | 'TWILIGHT';
}

3. Natural Language Segment Builder

3.1 Concept

Allow marketers to describe segments in plain English, and have LLM convert to segment rules.

┌─────────────────────────────────────────────────────────────────┐
│ USER INPUT │
│ "Find members who played at least twice last month but │
│ haven't opened any emails in the past 2 weeks" │
│ │
│ ▼ LLM │
│ │
│ GENERATED RULES │
│ { │
│ "combinator": "AND", │
│ "rules": [ │
│ { "field": "bookingCount30d", "operator": "gte", "value": 2 },
│ { "field": "lastEmailOpenAt", "operator": "lt", │
│ "value": { "relative": "-14 days" } } │
│ ] │
│ } │
└─────────────────────────────────────────────────────────────────┘

3.2 NL Segment Service

// crm/services/src/ai/nl-segment.service.ts

interface NLSegmentRequest {
naturalLanguageQuery: string;
tenantId: string;

// Context for better understanding
existingSegments?: string[]; // Available segment names for reference
recentCampaigns?: string[]; // Recent campaign names
}

interface NLSegmentResponse {
// Generated segment criteria
criteria: SegmentCriteria;

// Explanation
interpretation: string; // How the LLM understood the query
assumptions: string[]; // Assumptions made

// Validation
estimatedSize: number; // Approximate segment size
warnings: string[]; // Potential issues

// Alternatives
suggestions?: string[]; // Alternative interpretations
}

@Injectable()
export class NLSegmentService {
constructor(
private readonly llmClient: LLMClient,
private readonly segmentService: CrmSegmentService,
) {}

/**
* Convert natural language to segment criteria
*/
async parseNaturalLanguage(
request: NLSegmentRequest
): Promise<NLSegmentResponse> {
const schemaContext = await this.getSchemaContext();

const response = await this.llmClient.generate({
model: 'claude-3-5-sonnet', // Better reasoning for complex queries
systemPrompt: this.buildSystemPrompt(schemaContext),
userPrompt: request.naturalLanguageQuery,
temperature: 0.2, // Lower temperature for accuracy
});

const parsed = this.parseResponse(response);

// Validate and estimate size
const validation = await this.validateCriteria(
request.tenantId,
parsed.criteria
);

return {
...parsed,
estimatedSize: validation.estimatedSize,
warnings: validation.warnings,
};
}

private buildSystemPrompt(schema: SchemaContext): string {
return `You are a segment rule generator for a golf club CRM.

AVAILABLE FIELDS:
${schema.fields.map(f => `- ${f.name} (${f.type}): ${f.description}`).join('\n')}

AVAILABLE OPERATORS:
- eq: equals
- neq: not equals
- gt, gte, lt, lte: numeric comparisons
- contains: string/array contains
- isNull, isNotNull: null checks

RELATIVE DATES:
Use { "relative": "-7 days" } for date comparisons relative to now.

OUTPUT FORMAT:
{
"criteria": {
"combinator": "AND" | "OR",
"rules": [
{ "field": "...", "operator": "...", "value": ... }
]
},
"interpretation": "How I understood the query",
"assumptions": ["List of assumptions made"]
}

RULES:
- Use exact field names from the schema
- Default to AND combinator unless OR is clearly implied
- For ambiguous terms, make reasonable assumptions and list them
- "active" typically means status = 'ACTIVE'
- "recent" typically means last 30 days
- "high value" typically means top 20% by LTV or engagement`;
}

/**
* Get available fields with descriptions
*/
private async getSchemaContext(): Promise<SchemaContext> {
return {
fields: [
{ name: 'status', type: 'enum', description: 'ACTIVE, INACTIVE, MERGED, DELETED' },
{ name: 'lifecycleStage', type: 'enum', description: 'SUBSCRIBER, LEAD, CUSTOMER, ADVOCATE' },
{ name: 'membershipTier', type: 'string', description: 'Membership level (Bronze, Silver, Gold, etc.)' },
{ name: 'membershipStatus', type: 'string', description: 'ACTIVE, EXPIRED, CANCELLED' },
{ name: 'engagementScore', type: 'number', description: '0-100, higher is more engaged' },
{ name: 'churnRiskScore', type: 'number', description: '0-100, higher is more likely to churn' },
{ name: 'bookingCount30d', type: 'number', description: 'Tee time bookings in last 30 days' },
{ name: 'bookingCount90d', type: 'number', description: 'Tee time bookings in last 90 days' },
{ name: 'activityCount30d', type: 'number', description: 'Any activities in last 30 days' },
{ name: 'lastActivityAt', type: 'date', description: 'Last activity timestamp' },
{ name: 'lastBookingAt', type: 'date', description: 'Last tee time booking' },
{ name: 'lastEmailOpenAt', type: 'date', description: 'Last email opened' },
{ name: 'lastEmailClickAt', type: 'date', description: 'Last email link clicked' },
{ name: 'emailOptIn', type: 'boolean', description: 'Email marketing consent' },
{ name: 'smsOptIn', type: 'boolean', description: 'SMS marketing consent' },
{ name: 'handicap', type: 'number', description: 'Golf handicap (lower is better)' },
{ name: 'lifetimeValue', type: 'number', description: 'Total spend in cents' },
{ name: 'tags', type: 'array', description: 'Customer tags' },
{ name: 'memberSince', type: 'date', description: 'Membership start date' },
{ name: 'membershipExpiry', type: 'date', description: 'Membership expiry date' },
],
};
}
}

3.3 Example Queries

Natural LanguageGenerated Rules
"Gold members who haven't played in 30 days"membershipTier = 'Gold' AND lastBookingAt < -30d
"High handicappers who are engaged"handicap >= 18 AND engagementScore >= 70
"Members expiring next month"membershipExpiry BETWEEN now AND +30d
"Inactive premium members"membershipTier IN ['Gold','Platinum'] AND activityCount30d = 0
"Email subscribers who click but don't book"emailOptIn = true AND lastEmailClickAt > -14d AND bookingCount30d = 0

4. Journey Optimization

4.1 Auto-Optimization

ML-powered journey improvements based on performance data.

// crm/services/src/ai/journey-optimization.service.ts

interface JourneyOptimizationSuggestion {
journeyId: string;
type: 'TIMING' | 'CONTENT' | 'BRANCHING' | 'CHANNEL' | 'AUDIENCE';

suggestion: {
description: string; // Human-readable suggestion
reasoning: string; // Why this is suggested
confidence: number; // 0-1

// Specific change
stepId?: string;
currentConfig?: unknown;
suggestedConfig?: unknown;
};

// Expected impact
expectedImprovement: {
metric: 'COMPLETION_RATE' | 'ENGAGEMENT' | 'CONVERSION';
currentValue: number;
predictedValue: number;
uplift: number; // Percentage improvement
};
}

@Injectable()
export class JourneyOptimizationService {
/**
* Analyze journey and suggest optimizations
*/
async analyzeJourney(journeyId: string): Promise<JourneyOptimizationSuggestion[]> {
const journey = await this.journeyService.findByIdWithSteps(journeyId);
const stats = await this.getJourneyStats(journeyId);
const enrollmentData = await this.getEnrollmentData(journeyId);

const suggestions: JourneyOptimizationSuggestion[] = [];

// 1. Timing optimization
suggestions.push(...await this.analyzeTimingOptimizations(journey, stats));

// 2. Drop-off analysis
suggestions.push(...await this.analyzeDropOffs(journey, stats));

// 3. Channel optimization
suggestions.push(...await this.analyzeChannelPerformance(journey, stats));

// 4. Content optimization
suggestions.push(...await this.analyzeContentPerformance(journey, stats));

// 5. Branching optimization
suggestions.push(...await this.analyzeBranchingEfficiency(journey, stats));

return suggestions.sort((a, b) =>
b.expectedImprovement.uplift - a.expectedImprovement.uplift
);
}

/**
* Analyze WAIT step timing
*/
private async analyzeTimingOptimizations(
journey: JourneyWithSteps,
stats: JourneyStats
): Promise<JourneyOptimizationSuggestion[]> {
const suggestions: JourneyOptimizationSuggestion[] = [];

for (const step of journey.steps.filter(s => s.type === 'WAIT')) {
const stepStats = stats.stepStats[step.id];
const currentWait = this.parseWaitDuration(step.config);

// Check if customers are dropping off during wait
const dropOffRate = stepStats.droppedDuringWait / stepStats.entered;

if (dropOffRate > 0.2) {
// High drop-off - suggest shorter wait
suggestions.push({
journeyId: journey.id,
type: 'TIMING',
suggestion: {
description: `Reduce wait time from ${currentWait} to ${this.suggestShorterWait(currentWait)}`,
reasoning: `${(dropOffRate * 100).toFixed(0)}% of customers drop off during this wait. Shorter wait may retain more customers.`,
confidence: 0.7,
stepId: step.id,
currentConfig: step.config,
suggestedConfig: { duration: this.suggestShorterWait(currentWait) },
},
expectedImprovement: {
metric: 'COMPLETION_RATE',
currentValue: stats.completionRate,
predictedValue: stats.completionRate * (1 + dropOffRate * 0.3),
uplift: dropOffRate * 30,
},
});
}
}

return suggestions;
}

/**
* Use LLM to generate optimization insights
*/
async generateInsights(journeyId: string): Promise<string> {
const stats = await this.getDetailedStats(journeyId);

const response = await this.llmClient.generate({
model: 'claude-3-5-sonnet',
systemPrompt: `You are a marketing automation expert. Analyze journey performance data and provide actionable insights.`,
userPrompt: `
Journey: ${stats.journeyName}
Goal: ${stats.journeyGoal}

Performance:
- Enrolled: ${stats.totalEnrolled}
- Completed: ${stats.totalCompleted} (${stats.completionRate}%)
- Active: ${stats.activeCount}
- Avg duration: ${stats.avgDuration}

Step Performance:
${stats.steps.map(s => `- ${s.name}: ${s.enteredCount} entered, ${s.completedCount} completed (${s.completionRate}%)`).join('\n')}

Provide 3-5 specific, actionable recommendations to improve this journey.
`,
temperature: 0.3,
});

return response;
}
}

4.2 Automated Journey Creation

/**
* Generate a complete journey from a goal description
*/
async function generateJourneyFromGoal(
tenantId: string,
goal: string,
constraints?: JourneyConstraints
): Promise<GeneratedJourney> {
const response = await llmClient.generate({
model: 'claude-3-5-sonnet',
systemPrompt: `You are a marketing automation architect. Design customer journeys for golf clubs.

Available step types:
- SEND: Send email/SMS/push (config: { channel, templateId })
- WAIT: Delay (config: { duration: "X hours/days" })
- CONDITION: Branch based on customer data (config: { rules, combinator })
- UPDATE: Update customer profile (config: { updates })
- SPLIT: A/B test (config: { variants })
- END: End journey

Output a complete journey definition as JSON.`,
userPrompt: `
GOAL: ${goal}

CONSTRAINTS:
- Max steps: ${constraints?.maxSteps || 10}
- Channels available: ${constraints?.channels?.join(', ') || 'EMAIL, SMS, PUSH'}
- Duration: ${constraints?.maxDuration || '30 days'}

Design an effective journey to achieve this goal.
`,
});

return parseJourneyDefinition(response);
}

// Example usage:
const journey = await generateJourneyFromGoal(
tenantId,
"Re-engage members who haven't booked in 60 days with a series of increasingly urgent messages",
{ maxSteps: 6, channels: ['EMAIL', 'SMS'] }
);

// Generated journey:
// 1. SEND: "We miss you" email
// 2. WAIT: 3 days
// 3. CONDITION: Did they book?
// - YES → END (goal achieved)
// - NO → Continue
// 4. SEND: "Special offer" email with 10% discount
// 5. WAIT: 5 days
// 6. CONDITION: Did they book?
// - YES → END
// - NO → SEND: SMS "Last chance for discount"
// 7. END

5. Personalization Engine

5.1 Dynamic Content Blocks

// crm/services/src/ai/personalization-engine.service.ts

interface PersonalizationBlock {
id: string;
type: 'TEXT' | 'IMAGE' | 'OFFER' | 'PRODUCT' | 'CTA';

// Variants for different customer segments
variants: PersonalizationVariant[];

// Default if no variant matches
defaultContent: string;

// Selection strategy
strategy: 'RULE_BASED' | 'ML_PREDICTED' | 'LLM_GENERATED';
}

interface PersonalizationVariant {
id: string;
conditions: SegmentCriteria; // When to show this variant
content: string;

// For ML selection
predictedPerformance?: {
clickRate: number;
conversionRate: number;
};
}

@Injectable()
export class PersonalizationEngineService {
/**
* Select best content variant for a customer
*/
async selectVariant(
block: PersonalizationBlock,
customer: CustomerProfile
): Promise<string> {
switch (block.strategy) {
case 'RULE_BASED':
return this.selectByRules(block, customer);

case 'ML_PREDICTED':
return this.selectByMLPrediction(block, customer);

case 'LLM_GENERATED':
return this.generatePersonalized(block, customer);
}
}

/**
* ML-based variant selection using contextual bandit
*/
private async selectByMLPrediction(
block: PersonalizationBlock,
customer: CustomerProfile
): Promise<string> {
const features = await this.extractPersonalizationFeatures(customer);

// Predict click probability for each variant
const predictions = await Promise.all(
block.variants.map(async (variant) => ({
variant,
score: await this.predictVariantScore(variant.id, features),
}))
);

// Epsilon-greedy: 90% best, 10% explore
if (Math.random() < 0.1) {
const randomIdx = Math.floor(Math.random() * predictions.length);
return predictions[randomIdx].variant.content;
}

const best = predictions.reduce((a, b) => a.score > b.score ? a : b);
return best.variant.content;
}

/**
* LLM-generated personalized content
*/
private async generatePersonalized(
block: PersonalizationBlock,
customer: CustomerProfile
): Promise<string> {
const response = await this.llmClient.generate({
model: 'claude-3-haiku',
systemPrompt: `Generate personalized marketing content. Keep the same structure but adapt tone and specifics to the customer.`,
userPrompt: `
BASE CONTENT: ${block.defaultContent}

CUSTOMER CONTEXT:
- Name: ${customer.firstName}
- Membership: ${customer.membershipTier}
- Handicap: ${customer.handicap}
- Engagement: ${customer.engagementScore > 70 ? 'High' : customer.engagementScore > 40 ? 'Medium' : 'Low'}
- Last booking: ${customer.lastBookingAt}
- Interests: ${customer.tags?.join(', ')}

Personalize the content for this customer. Keep it concise.
`,
temperature: 0.7,
maxTokens: 200,
});

return response;
}
}

5.2 Recommendation Engine

interface RecommendationRequest {
customerId: string;
type: 'COMPETITION' | 'TEE_TIME' | 'PRODUCT' | 'CONTENT';
count: number;
context?: {
currentPage?: string;
recentViews?: string[];
cartItems?: string[];
};
}

interface Recommendation {
itemId: string;
itemType: string;
title: string;
reason: string; // "Popular with Gold members"
score: number;

// For display
imageUrl?: string;
price?: number;
ctaText?: string;
ctaUrl?: string;
}

@Injectable()
export class RecommendationService {
/**
* Get personalized recommendations
*/
async getRecommendations(
request: RecommendationRequest
): Promise<Recommendation[]> {
const customer = await this.getCustomerProfile(request.customerId);

// Collaborative filtering: Similar customers liked...
const collaborativeRecs = await this.getCollaborativeRecs(customer, request.type);

// Content-based: Based on customer's history
const contentRecs = await this.getContentBasedRecs(customer, request.type);

// Merge and rank
const merged = this.mergeRecommendations(collaborativeRecs, contentRecs);

// Add explanations
const withReasons = await this.addExplanations(merged, customer);

return withReasons.slice(0, request.count);
}

/**
* Generate explanation for recommendation
*/
private async addExplanations(
recs: Recommendation[],
customer: CustomerProfile
): Promise<Recommendation[]> {
// Use LLM to generate human-friendly reasons
const response = await this.llmClient.generate({
model: 'claude-3-haiku',
systemPrompt: `Generate short (5-10 word) explanations for recommendations.`,
userPrompt: `
Customer: ${customer.membershipTier} member, handicap ${customer.handicap}, ${customer.engagementScore > 70 ? 'very active' : 'occasional'} player

Recommendations to explain:
${recs.map(r => `- ${r.title} (score: ${r.score})`).join('\n')}

Return JSON: [{ "itemId": "...", "reason": "..." }]
`,
});

const reasons = JSON.parse(response);
return recs.map(r => ({
...r,
reason: reasons.find(x => x.itemId === r.itemId)?.reason || r.reason,
}));
}
}

6. Churn Intervention AI

6.1 Automated Intervention Recommendations

// crm/services/src/ai/churn-intervention.service.ts

interface ChurnIntervention {
customerId: string;

// Risk assessment
churnProbability: number;
riskLevel: 'LOW' | 'MEDIUM' | 'HIGH' | 'CRITICAL';
riskFactors: RiskFactor[];

// Recommended actions (ranked)
recommendedActions: InterventionAction[];

// Predicted outcomes
doNothingOutcome: {
churnProbability: number;
expectedLTVLoss: number;
};

withInterventionOutcome: {
churnProbability: number;
expectedLTVRetained: number;
costOfIntervention: number;
roi: number;
};
}

interface InterventionAction {
type: 'EMAIL' | 'SMS' | 'CALL' | 'OFFER' | 'UPGRADE' | 'DOWNGRADE' | 'PAUSE';
priority: number;

// Action details
description: string;
suggestedContent?: string;
suggestedOffer?: {
type: 'DISCOUNT' | 'FREE_ROUND' | 'UPGRADE' | 'EXTENSION';
value: number;
validDays: number;
};

// Expected impact
expectedChurnReduction: number; // Percentage points
confidence: number;

// Timing
suggestedTiming: 'IMMEDIATE' | 'NEXT_DAY' | 'NEXT_WEEK';
optimalSendTime?: Date;
}

@Injectable()
export class ChurnInterventionService {
/**
* Get intervention recommendations for at-risk customers
*/
async getInterventions(
tenantId: string,
options?: { minRiskScore?: number; limit?: number }
): Promise<ChurnIntervention[]> {
// Get at-risk customers
const atRiskCustomers = await this.prisma.customerProfile.findMany({
where: {
tenantId,
status: 'ACTIVE',
churnRiskScore: { gte: options?.minRiskScore || 60 },
},
orderBy: { churnRiskScore: 'desc' },
take: options?.limit || 100,
});

// Generate interventions for each
return Promise.all(
atRiskCustomers.map(c => this.generateIntervention(c))
);
}

/**
* Generate personalized intervention for a customer
*/
async generateIntervention(
customer: CustomerProfile
): Promise<ChurnIntervention> {
const riskFactors = await this.mlScoring.getChurnRiskFactors(customer.id);
const customerHistory = await this.getCustomerHistory(customer.id);

// Use LLM to generate contextual intervention
const llmSuggestion = await this.generateLLMIntervention(
customer,
riskFactors,
customerHistory
);

// Combine with ML-predicted effectiveness
const actions = await this.rankInterventions(
customer,
llmSuggestion.suggestedActions,
riskFactors
);

return {
customerId: customer.id,
churnProbability: customer.churnRiskScore / 100,
riskLevel: this.getRiskLevel(customer.churnRiskScore),
riskFactors,
recommendedActions: actions,
doNothingOutcome: this.predictDoNothingOutcome(customer),
withInterventionOutcome: this.predictInterventionOutcome(customer, actions[0]),
};
}

/**
* LLM-powered intervention strategy
*/
private async generateLLMIntervention(
customer: CustomerProfile,
riskFactors: RiskFactor[],
history: CustomerHistory
): Promise<LLMInterventionSuggestion> {
const response = await this.llmClient.generate({
model: 'claude-3-5-sonnet',
systemPrompt: `You are a customer retention expert for golf clubs. Analyze at-risk customers and suggest personalized retention strategies.

Consider:
1. The specific risk factors driving churn
2. Customer's history and value
3. What interventions have worked before
4. Cost-effectiveness of different approaches

Output specific, actionable interventions with expected impact.`,
userPrompt: `
CUSTOMER PROFILE:
- Member since: ${customer.memberSince}
- Tier: ${customer.membershipTier}
- Lifetime value: $${customer.lifetimeValue / 100}
- Engagement score: ${customer.engagementScore}/100
- Churn risk: ${customer.churnRiskScore}/100

RISK FACTORS:
${riskFactors.map(f => `- ${f.humanReadable} (impact: ${f.contribution})`).join('\n')}

RECENT HISTORY:
- Last booking: ${history.lastBooking || 'Never'}
- Last email open: ${history.lastEmailOpen || 'Never'}
- Recent complaints: ${history.recentComplaints || 'None'}
- Previous offers: ${history.previousOffers?.join(', ') || 'None'}

WHAT HAS WORKED FOR SIMILAR CUSTOMERS:
${history.similarCustomerSuccesses?.join('\n') || 'No data'}

Generate 3 ranked intervention strategies with specific messaging.
`,
});

return this.parseLLMIntervention(response);
}

/**
* Execute approved intervention
*/
async executeIntervention(
customerId: string,
action: InterventionAction,
approvedBy: string
): Promise<void> {
// Log intervention
await this.logIntervention(customerId, action, approvedBy);

switch (action.type) {
case 'EMAIL':
await this.sendInterventionEmail(customerId, action);
break;
case 'SMS':
await this.sendInterventionSMS(customerId, action);
break;
case 'OFFER':
await this.createOffer(customerId, action.suggestedOffer);
break;
case 'CALL':
await this.scheduleCall(customerId, action);
break;
}

// Track for effectiveness measurement
await this.trackInterventionStart(customerId, action);
}
}

6.2 Intervention Effectiveness Tracking

interface InterventionEffectiveness {
interventionId: string;
customerId: string;
actionType: string;

// Before intervention
churnRiskBefore: number;
engagementBefore: number;

// After intervention (measured after 30 days)
churnRiskAfter?: number;
engagementAfter?: number;
didChurn: boolean;

// Outcome
wasEffective: boolean;
ltRetained?: number;
costOfIntervention: number;
roi?: number;
}

// Track and learn from interventions
async function measureInterventionEffectiveness(): Promise<void> {
const pendingMeasurements = await getPendingMeasurements();

for (const intervention of pendingMeasurements) {
const customer = await getCustomer(intervention.customerId);

intervention.churnRiskAfter = customer.churnRiskScore;
intervention.engagementAfter = customer.engagementScore;
intervention.didChurn = customer.status !== 'ACTIVE';
intervention.wasEffective = !intervention.didChurn &&
intervention.churnRiskAfter < intervention.churnRiskBefore;

await saveEffectiveness(intervention);
}

// Retrain intervention effectiveness model
await retrainInterventionModel();
}

7. AI Cost Considerations

7.1 LLM Costs

ModelUse CaseEst. Cost per 1K Calls
Claude 3 HaikuContent generation$0.25
Claude 3.5 SonnetComplex reasoning$3.00
GPT-4 TurboAlternative$10.00

7.2 Cost Optimization

interface AICostConfig {
// Model selection
contentGeneration: 'HAIKU' | 'SONNET'; // Haiku is 12x cheaper
segmentParsing: 'HAIKU' | 'SONNET'; // Sonnet for accuracy
interventions: 'SONNET'; // Need reasoning

// Caching
cacheContentVariants: boolean; // Cache generated content
cacheTTLHours: number; // How long to cache

// Rate limiting
maxLLMCallsPerHour: number; // Prevent runaway costs
maxLLMCostPerDay: number; // Hard cost cap

// Fallbacks
fallbackToRules: boolean; // If LLM unavailable
}

// Monthly cost estimate
const estimatedMonthlyCost = {
contentGeneration: 1000 * 0.25 / 1000, // 1K campaigns = $0.25
segmentQueries: 500 * 3.00 / 1000, // 500 queries = $1.50
interventions: 200 * 3.00 / 1000, // 200 at-risk = $0.60
journeyOptimization: 50 * 3.00 / 1000, // 50 journeys = $0.15
// Total: ~$2.50/tenant/month for AI features
};

8. Data Flow Architecture

┌─────────────────────────────────────────────────────────────────────────┐
│ AI MARKETING AUTOMATION FLOW │
├─────────────────────────────────────────────────────────────────────────┤
│ │
│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │
│ │ Customer │ │ ML Models │ │ LLM │ │
│ │ Data │────▶│ (Scoring) │────▶│ (Content) │ │
│ └─────────────┘ └─────────────┘ └─────────────┘ │
│ │ │ │ │
│ ▼ ▼ ▼ │
│ ┌─────────────────────────────────────────────────────────────┐ │
│ │ PERSONALIZATION ENGINE │ │
│ │ - Segment matching │ │
│ │ - Content variant selection │ │
│ │ - Send time optimization │ │
│ │ - Channel preference │ │
│ └─────────────────────────────────────────────────────────────┘ │
│ │ │
│ ┌────────────────────┼────────────────────┐ │
│ ▼ ▼ ▼ │
│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │
│ │ Journey │ │ Campaign │ │ Churn │ │
│ │ Automation │ │ Execution │ │ Intervention│ │
│ └─────────────┘ └─────────────┘ └─────────────┘ │
│ │ │ │ │
│ └────────────────────┼────────────────────┘ │
│ ▼ │
│ ┌─────────────────┐ │
│ │ Messaging │ │
│ │ Service │ │
│ │ (Delivery) │ │
│ └─────────────────┘ │
│ │
└─────────────────────────────────────────────────────────────────────────┘

9. Privacy & Data Handling

9.1 Data Minimization for LLM Calls

// NEVER send PII to external LLMs - anonymize first

interface LLMDataPolicy {
// Fields NEVER sent to LLM
excludedFields: [
'email',
'phoneNumber',
'dateOfBirth',
'address',
'idNumber', // SA ID
'passportNumber',
];

// Fields sent anonymized/tokenized
tokenizedFields: [
'firstName', // Sent as {{firstName}} placeholder
'lastName',
'memberNumber',
];

// Fields sent as aggregates only
aggregatedFields: [
'lifetimeValue', // Sent as range: "high value"
'age', // Sent as range: "35-44"
];
}

@Injectable()
export class LLMDataSanitizer {
/**
* Sanitize customer data before sending to LLM
*/
sanitizeForLLM(customer: CustomerProfile): SanitizedContext {
return {
// Tokenized (replaced after LLM response)
firstName: '{{firstName}}',
lastName: '{{lastName}}',

// Categorized (no raw values)
valueSegment: this.categorizeValue(customer.lifetimeValue),
ageGroup: this.categorizeAge(customer.dateOfBirth),
engagementLevel: this.categorizeEngagement(customer.engagementScore),

// Safe to include
membershipTier: customer.membershipTier,
preferredChannel: customer.preferredChannel,
interests: customer.tags?.filter(t => t.category === 'INTEREST'),
};
}

private categorizeValue(ltv: number): string {
if (ltv > 100000) return 'high-value';
if (ltv > 30000) return 'medium-value';
return 'standard';
}
}

9.2 POPIA/GDPR Compliance

interface AIConsentConfig {
// Explicit consent required for AI personalization
requireAIConsent: boolean;
consentFieldName: string; // 'aiMarketingConsent'

// Right to explanation
provideExplanations: boolean; // "Why did I receive this?"
explanationEndpoint: string; // GET /api/v1/ai/explain/:messageId

// Opt-out
honorDoNotProfile: boolean; // Skip AI for opted-out customers
doNotProfileField: string; // 'doNotProfile'
}

// Check consent before AI processing
async function canUseAI(customerId: string): Promise<boolean> {
const profile = await this.getProfile(customerId);

// Check explicit AI consent
if (!profile.aiMarketingConsent) return false;

// Check do-not-profile flag
if (profile.doNotProfile) return false;

// Check general marketing consent
if (!profile.emailOptIn && !profile.smsOptIn) return false;

return true;
}

10. Security & Prompt Safety

10.1 Prompt Injection Prevention

// Prevent prompt injection attacks

interface PromptSecurityConfig {
// Input sanitization
maxInputLength: number; // Truncate long inputs
sanitizeUserInput: boolean; // Remove potential injection patterns
allowedCharacters: RegExp; // Whitelist safe characters

// Output validation
validateOutput: boolean; // Check for malicious output
maxOutputLength: number; // Prevent token exhaustion
requiredOutputFormat: 'JSON' | 'TEXT';
}

@Injectable()
export class PromptSecurityService {
private readonly INJECTION_PATTERNS = [
/ignore.*previous.*instructions/i,
/disregard.*above/i,
/system.*prompt/i,
/```.*system/i,
/<\|.*\|>/, // Special tokens
];

/**
* Sanitize user-provided input before including in prompt
*/
sanitizeInput(input: string): string {
// Remove potential injection attempts
let sanitized = input;

for (const pattern of this.INJECTION_PATTERNS) {
sanitized = sanitized.replace(pattern, '[REMOVED]');
}

// Escape special characters
sanitized = sanitized
.replace(/\\/g, '\\\\')
.replace(/"/g, '\\"');

// Truncate to max length
return sanitized.slice(0, 500);
}

/**
* Validate LLM output before using
*/
validateOutput(output: string, expectedFormat: 'JSON' | 'TEXT'): ValidationResult {
// Check for suspicious patterns in output
const hasSuspicious = this.INJECTION_PATTERNS.some(p => p.test(output));

if (hasSuspicious) {
this.logger.warn('Suspicious LLM output detected', { output: output.slice(0, 100) });
return { valid: false, reason: 'SUSPICIOUS_OUTPUT' };
}

// Validate JSON if expected
if (expectedFormat === 'JSON') {
try {
JSON.parse(output);
} catch {
return { valid: false, reason: 'INVALID_JSON' };
}
}

return { valid: true };
}
}

10.2 Output Sanitization

// Sanitize generated content before use

@Injectable()
export class ContentSanitizer {
/**
* Sanitize LLM-generated content
*/
sanitizeGeneratedContent(content: string, type: ContentType): string {
// Remove any HTML/script injection
let sanitized = this.stripDangerousTags(content);

// Validate personalization tokens
sanitized = this.validateTokens(sanitized);

// Check for inappropriate content
if (this.containsInappropriate(sanitized)) {
throw new ContentModerationError('Generated content failed moderation');
}

return sanitized;
}

private stripDangerousTags(html: string): string {
return html
.replace(/<script[^>]*>.*?<\/script>/gi, '')
.replace(/<iframe[^>]*>.*?<\/iframe>/gi, '')
.replace(/on\w+="[^"]*"/gi, '')
.replace(/javascript:/gi, '');
}

private validateTokens(content: string): string {
const ALLOWED_TOKENS = ['firstName', 'lastName', 'membershipTier', 'clubName'];
const tokenPattern = /\{\{(\w+)\}\}/g;

return content.replace(tokenPattern, (match, token) => {
if (ALLOWED_TOKENS.includes(token)) return match;
return '[INVALID_TOKEN]';
});
}
}

11. Audit & Explainability

11.1 AI Decision Logging

// Log all AI decisions for audit and debugging

model AIDecisionLog {
id String @id @default(cuid())
tenantId String
customerId String?
decisionType AIDecisionType
modelUsed String // 'claude-3-haiku', 'churn-classifier-v1'
inputSummary Json // Sanitized input (no PII)
outputSummary Json // Generated output
confidence Float? // Model confidence
latencyMs Int // Response time
cost Float // Estimated LLM cost
wasUsed Boolean @default(true) // Was the output actually used?
createdAt DateTime @default(now())

@@index([tenantId, decisionType, createdAt])
@@index([customerId, createdAt])
}

enum AIDecisionType {
CONTENT_GENERATION
SEND_TIME_PREDICTION
SEGMENT_PARSING
JOURNEY_OPTIMIZATION
CHURN_INTERVENTION
PERSONALIZATION
}

11.2 Explainability API

// Provide explanations for AI decisions (GDPR Article 22)

@Controller('api/v1/ai')
export class AIExplainabilityController {
@Get('explain/:messageId')
@ApiOperation({ summary: 'Explain why a message was sent' })
async explainMessage(
@Param('messageId') messageId: string
): Promise<MessageExplanation> {
const log = await this.getDecisionLog(messageId);

return {
messageId,
explanation: {
whyYouReceivedThis: this.generateWhyExplanation(log),
contentDecisions: {
subjectLine: log.contentDecisions?.subjectLine,
approach: log.contentDecisions?.approach,
},
timingDecisions: {
sendTime: log.timingDecisions?.selectedTime,
reason: log.timingDecisions?.reason,
},
segmentMembership: {
segments: log.segments,
rules: log.segmentRules,
},
},
howToOptOut: {
emailPreferences: '/preferences/email',
aiPersonalization: '/preferences/ai',
},
};
}

private generateWhyExplanation(log: AIDecisionLog): string {
const reasons: string[] = [];

if (log.segments?.includes('active-members')) {
reasons.push('You are an active member of the club');
}
if (log.timingDecisions?.reason === 'ENGAGEMENT_HISTORY') {
reasons.push('We sent this at a time you typically engage with emails');
}
if (log.contentDecisions?.personalized) {
reasons.push('The content was personalized based on your interests');
}

return reasons.join('. ') + '.';
}
}

11.3 A/B Test Attribution

// Track which AI variant was shown and its performance

interface AIVariantAttribution {
messageId: string;
customerId: string;
campaignId: string;

// Content variant
contentVariantId: string;
contentApproach: string; // 'urgency', 'benefit', 'social-proof'
contentGeneratedBy: 'AI' | 'HUMAN';

// Timing variant
sendTimeVariant: string; // 'ML_OPTIMIZED' | 'SCHEDULED'
actualSendTime: Date;

// Outcome (updated async)
opened: boolean;
clicked: boolean;
converted: boolean;
unsubscribed: boolean;
}

// Closed-loop learning
async function recordOutcome(attribution: AIVariantAttribution): Promise<void> {
// Update decision log with outcome
await this.prisma.aIDecisionLog.update({
where: { id: attribution.decisionLogId },
data: {
outcome: {
opened: attribution.opened,
clicked: attribution.clicked,
converted: attribution.converted,
},
},
});

// Feed back to ML models for learning
if (attribution.contentGeneratedBy === 'AI') {
await this.contentLearningService.recordOutcome(attribution);
}
if (attribution.sendTimeVariant === 'ML_OPTIMIZED') {
await this.stoLearningService.recordOutcome(attribution);
}
}

12. Error Handling & Fallbacks

12.1 Graceful Degradation

interface AIFallbackConfig {
// LLM failures
onLLMTimeout: 'USE_TEMPLATE' | 'RETRY' | 'SKIP';
onLLMError: 'USE_TEMPLATE' | 'SKIP' | 'ALERT';
maxRetries: number;
retryDelayMs: number;

// Content fallbacks
fallbackSubjectTemplate: string;
fallbackBodyTemplate: string;

// STO fallbacks
onSTOFailure: 'USE_DEFAULT_TIME' | 'USE_CAMPAIGN_TIME';
defaultSendHour: number; // 10 AM

// Segment parsing fallbacks
onParseFailure: 'SHOW_ERROR' | 'SUGGEST_MANUAL';
}

@Injectable()
export class AIFallbackService {
async generateContentWithFallback(
request: ContentGenerationRequest
): Promise<ContentResult> {
try {
// Try AI generation
const content = await this.contentService.generateVariants(request);
return { source: 'AI', content };
} catch (error) {
this.logger.warn('AI content generation failed, using fallback', { error });

// Emit event for monitoring
this.events.emit(CrmEvent.AIFallbackUsed, {
type: 'CONTENT_GENERATION',
reason: error.message,
});

// Return template-based content
return {
source: 'TEMPLATE',
content: await this.templateService.getDefault(request.type),
};
}
}

async getOptimalSendTimeWithFallback(
customerId: string,
campaignTime: Date
): Promise<SendTimeResult> {
try {
const optimal = await this.stoService.getOptimalTime(customerId);
return { source: 'ML', time: optimal };
} catch (error) {
this.logger.warn('STO prediction failed, using campaign time', { error });

return {
source: 'FALLBACK',
time: campaignTime,
reason: 'ML prediction unavailable',
};
}
}
}

12.2 Circuit Breaker

// Prevent cascade failures when AI services are degraded

interface CircuitBreakerConfig {
failureThreshold: number; // Failures before opening
successThreshold: number; // Successes to close
timeoutMs: number; // Half-open check interval
}

@Injectable()
export class AICircuitBreaker {
private failures = 0;
private successes = 0;
private state: 'CLOSED' | 'OPEN' | 'HALF_OPEN' = 'CLOSED';
private lastFailure?: Date;

async execute<T>(
operation: () => Promise<T>,
fallback: () => Promise<T>
): Promise<T> {
// If circuit is open, use fallback
if (this.state === 'OPEN') {
if (this.shouldAttemptReset()) {
this.state = 'HALF_OPEN';
} else {
return fallback();
}
}

try {
const result = await operation();
this.onSuccess();
return result;
} catch (error) {
this.onFailure();
return fallback();
}
}

private onFailure(): void {
this.failures++;
this.lastFailure = new Date();

if (this.failures >= this.config.failureThreshold) {
this.state = 'OPEN';
this.logger.error('AI circuit breaker opened', {
failures: this.failures,
});
}
}

private onSuccess(): void {
if (this.state === 'HALF_OPEN') {
this.successes++;
if (this.successes >= this.config.successThreshold) {
this.state = 'CLOSED';
this.failures = 0;
this.successes = 0;
}
}
}
}

13. Human-in-the-Loop Workflows

13.1 Content Approval Workflow

// For high-stakes AI content, require human approval before sending

enum ApprovalRequirement {
NONE = 'NONE', // Auto-send (low risk)
SAMPLE_REVIEW = 'SAMPLE_REVIEW', // Review 5% random sample
FIRST_USE = 'FIRST_USE', // First time a variant is used
ALL = 'ALL', // Every message requires approval
}

interface TenantAIApprovalConfig {
tenantId: string;

// Content approval settings
emailSubjectApproval: ApprovalRequirement;
emailBodyApproval: ApprovalRequirement;
smsApproval: ApprovalRequirement;
pushApproval: ApprovalRequirement;

// Automatic approval after N approved uses
autoApproveAfterCount: number; // e.g., 10 approved → auto-approve

// Reviewers
defaultReviewers: string[]; // User IDs
escalationReviewer?: string; // For timeouts
approvalTimeoutHours: number; // Auto-reject after N hours
}

@Injectable()
export class ContentApprovalService {
/**
* Submit AI-generated content for approval
*/
async submitForApproval(
content: GeneratedContent,
context: ApprovalContext
): Promise<ApprovalRequest> {
const requirement = this.getApprovalRequirement(context);

if (requirement === ApprovalRequirement.NONE) {
return { status: 'AUTO_APPROVED', content };
}

// Create approval request
const request = await this.prisma.contentApprovalRequest.create({
data: {
tenantId: context.tenantId,
contentType: content.type,
generatedContent: content,
campaignId: context.campaignId,
status: 'PENDING',
reviewers: context.reviewers,
expiresAt: this.getExpirationTime(context),
},
});

// Notify reviewers
await this.notifyReviewers(request);

return { status: 'PENDING', requestId: request.id };
}

/**
* Approve or reject content
*/
async reviewContent(
requestId: string,
decision: ApprovalDecision,
reviewerId: string
): Promise<void> {
const request = await this.prisma.contentApprovalRequest.findUnique({
where: { id: requestId },
});

await this.prisma.contentApprovalRequest.update({
where: { id: requestId },
data: {
status: decision.approved ? 'APPROVED' : 'REJECTED',
reviewedBy: reviewerId,
reviewedAt: new Date(),
feedback: decision.feedback,
editedContent: decision.editedContent,
},
});

if (decision.approved) {
// Track approval for auto-approve threshold
await this.trackApproval(request.contentHash);

// Resume campaign execution
await this.resumeCampaign(request.campaignId, decision.editedContent);
} else {
// Notify campaign owner of rejection
await this.notifyRejection(request, decision.feedback);
}
}
}

// Prisma model for approval requests
model ContentApprovalRequest {
id String @id @default(cuid())
tenantId String
contentType String // EMAIL_SUBJECT, EMAIL_BODY, etc.
generatedContent Json
campaignId String?
status ApprovalStatus @default(PENDING)
reviewers String[] // User IDs
reviewedBy String?
reviewedAt DateTime?
feedback String?
editedContent Json? // If reviewer edited
contentHash String // For tracking repeat content
expiresAt DateTime
createdAt DateTime @default(now())

@@index([tenantId, status])
@@index([campaignId])
}

enum ApprovalStatus {
PENDING
APPROVED
REJECTED
EXPIRED
}

13.2 Intervention Approval

// High-value interventions require manager approval

interface InterventionApprovalRule {
// Require approval when offer value exceeds threshold
offerValueThreshold: number; // e.g., 5000 cents ($50)

// Require approval for certain action types
requireApprovalFor: InterventionActionType[];

// Auto-execute low-risk interventions
autoExecuteFor: InterventionActionType[];
}

// Example config
const interventionApprovalConfig: InterventionApprovalRule = {
offerValueThreshold: 5000, // Offers > $50 need approval
requireApprovalFor: ['CALL', 'UPGRADE', 'LARGE_DISCOUNT'],
autoExecuteFor: ['EMAIL', 'SMS', 'SMALL_DISCOUNT'],
};

14. AI Quality Metrics & Monitoring

14.1 Quality Dashboard Metrics

interface AIQualityMetrics {
// Content generation quality
contentMetrics: {
averageVariantCount: number; // Variants generated per request
approvalRate: number; // % of content approved by humans
editRate: number; // % of content edited before approval
rejectionRate: number; // % rejected

// Performance comparison
aiVsHumanOpenRate: {
ai: number;
human: number;
uplift: number;
};
aiVsHumanClickRate: {
ai: number;
human: number;
uplift: number;
};
};

// STO quality
stoMetrics: {
avgPredictionConfidence: number;
actualVsPredictedAccuracy: number; // Did customers open at predicted time?
openRateImprovement: number; // vs. fixed-time sends
};

// NL Segment quality
segmentMetrics: {
parseSuccessRate: number; // % successfully parsed
avgAssumptionsCount: number; // Assumptions made per query
userAcceptanceRate: number; // % of parsed segments created
};

// System health
systemMetrics: {
avgLatencyMs: number;
errorRate: number;
fallbackRate: number;
circuitBreakerEvents: number;
totalLLMCalls: number;
totalCost: number;
};
}

@Injectable()
export class AIQualityService {
/**
* Calculate AI quality metrics for dashboard
*/
async getQualityMetrics(
tenantId: string,
dateRange: DateRange
): Promise<AIQualityMetrics> {
const [
contentStats,
stoStats,
segmentStats,
systemStats,
] = await Promise.all([
this.getContentStats(tenantId, dateRange),
this.getSTOStats(tenantId, dateRange),
this.getSegmentStats(tenantId, dateRange),
this.getSystemStats(tenantId, dateRange),
]);

return {
contentMetrics: contentStats,
stoMetrics: stoStats,
segmentMetrics: segmentStats,
systemMetrics: systemStats,
};
}

/**
* Compare AI vs human content performance
*/
async compareAIvsHuman(
tenantId: string,
dateRange: DateRange
): Promise<AIvsHumanComparison> {
// Get messages with AI-generated content
const aiMessages = await this.prisma.campaignInteraction.findMany({
where: {
tenantId,
occurredAt: { gte: dateRange.start, lte: dateRange.end },
metadata: { path: ['contentSource'], equals: 'AI' },
},
});

// Get messages with human-written content
const humanMessages = await this.prisma.campaignInteraction.findMany({
where: {
tenantId,
occurredAt: { gte: dateRange.start, lte: dateRange.end },
metadata: { path: ['contentSource'], equals: 'HUMAN' },
},
});

return {
ai: this.calculateEngagementRates(aiMessages),
human: this.calculateEngagementRates(humanMessages),
statisticalSignificance: this.calculateSignificance(aiMessages, humanMessages),
};
}
}

14.2 Cost Alerting

interface AICostAlertConfig {
tenantId: string;

// Alert thresholds
dailyCostWarning: number; // e.g., $5
dailyCostCritical: number; // e.g., $10
monthlyCostBudget: number; // e.g., $50

// Actions
onWarning: 'NOTIFY' | 'NOTIFY_AND_THROTTLE';
onCritical: 'NOTIFY' | 'THROTTLE' | 'DISABLE';

// Notification
alertRecipients: string[];
alertChannels: ('EMAIL' | 'SLACK' | 'WEBHOOK')[];
}

@Injectable()
export class AICostMonitorService {
@Cron('0 * * * *') // Every hour
async checkCostAlerts(): Promise<void> {
const tenants = await this.getActiveTenantsWithAI();

for (const tenant of tenants) {
const costs = await this.calculateDailyCosts(tenant.id);
const config = await this.getAlertConfig(tenant.id);

if (costs.totalToday >= config.dailyCostCritical) {
await this.triggerAlert(tenant.id, 'CRITICAL', costs);

if (config.onCritical === 'DISABLE') {
await this.disableAI(tenant.id, 'Cost limit exceeded');
} else if (config.onCritical === 'THROTTLE') {
await this.throttleAI(tenant.id, 0.1); // 10% of normal rate
}
} else if (costs.totalToday >= config.dailyCostWarning) {
await this.triggerAlert(tenant.id, 'WARNING', costs);

if (config.onWarning === 'NOTIFY_AND_THROTTLE') {
await this.throttleAI(tenant.id, 0.5); // 50% of normal rate
}
}

// Monthly budget check
if (costs.totalMonth >= config.monthlyCostBudget * 0.8) {
await this.triggerAlert(tenant.id, 'BUDGET_WARNING', {
used: costs.totalMonth,
budget: config.monthlyCostBudget,
percentUsed: (costs.totalMonth / config.monthlyCostBudget) * 100,
});
}
}
}
}

15. Tenant-Level AI Configuration

15.1 Feature Toggles

interface TenantAIConfig {
tenantId: string;

// Feature flags
features: {
contentGeneration: boolean;
sendTimeOptimization: boolean;
nlSegmentBuilder: boolean;
journeyOptimization: boolean;
personalizationEngine: boolean;
churnIntervention: boolean;
};

// Model preferences
preferredModel: 'CLAUDE_HAIKU' | 'CLAUDE_SONNET' | 'GPT4';
fallbackModel: 'CLAUDE_HAIKU' | 'TEMPLATE_ONLY';

// Quality vs cost tradeoff
qualityTier: 'ECONOMY' | 'STANDARD' | 'PREMIUM';

// Rate limits
maxDailyLLMCalls: number;
maxMonthlyCost: number;

// Content settings
brandVoice?: string;
avoidWords?: string[];
requireApproval: boolean;

// Privacy settings
allowLLMPersonalization: boolean; // Can we use customer context?
piiExclusions: string[]; // Additional PII fields to exclude
}

// Preset configurations by tier
const AI_TIER_PRESETS: Record<string, Partial<TenantAIConfig>> = {
ECONOMY: {
preferredModel: 'CLAUDE_HAIKU',
maxDailyLLMCalls: 100,
maxMonthlyCost: 25,
features: {
contentGeneration: true,
sendTimeOptimization: false,
nlSegmentBuilder: true,
journeyOptimization: false,
personalizationEngine: false,
churnIntervention: false,
},
},
STANDARD: {
preferredModel: 'CLAUDE_HAIKU',
maxDailyLLMCalls: 500,
maxMonthlyCost: 100,
features: {
contentGeneration: true,
sendTimeOptimization: true,
nlSegmentBuilder: true,
journeyOptimization: true,
personalizationEngine: false,
churnIntervention: true,
},
},
PREMIUM: {
preferredModel: 'CLAUDE_SONNET',
fallbackModel: 'CLAUDE_HAIKU',
maxDailyLLMCalls: 2000,
maxMonthlyCost: 500,
features: {
contentGeneration: true,
sendTimeOptimization: true,
nlSegmentBuilder: true,
journeyOptimization: true,
personalizationEngine: true,
churnIntervention: true,
},
},
};

15.2 Admin API

@Controller('api/v1/admin/ai')
@UseGuards(TenantAdminGuard)
export class AIAdminController {
@Get('config')
@ApiOperation({ summary: 'Get tenant AI configuration' })
async getConfig(@TenantId() tenantId: string): Promise<TenantAIConfig> {
return this.aiConfigService.getConfig(tenantId);
}

@Patch('config')
@ApiOperation({ summary: 'Update tenant AI configuration' })
async updateConfig(
@TenantId() tenantId: string,
@Body() dto: UpdateAIConfigDto
): Promise<TenantAIConfig> {
return this.aiConfigService.updateConfig(tenantId, dto);
}

@Get('usage')
@ApiOperation({ summary: 'Get AI usage statistics' })
async getUsage(
@TenantId() tenantId: string,
@Query('range') range: '7d' | '30d' | '90d' = '30d'
): Promise<AIUsageStats> {
return this.aiUsageService.getStats(tenantId, range);
}

@Get('quality')
@ApiOperation({ summary: 'Get AI quality metrics' })
async getQuality(
@TenantId() tenantId: string
): Promise<AIQualityMetrics> {
return this.aiQualityService.getMetrics(tenantId);
}

@Post('test')
@ApiOperation({ summary: 'Test AI content generation' })
async testGeneration(
@TenantId() tenantId: string,
@Body() dto: TestGenerationDto
): Promise<TestGenerationResult> {
// Generate content without sending
return this.contentService.testGeneration(tenantId, dto);
}
}

16. Implementation Plan

Phase 1: Foundation (Weeks 1-2)

  • LLM client abstraction (Claude/GPT)
  • Content generation service (basic)
  • Cost tracking and rate limiting

Phase 2: Content Generation (Weeks 3-4)

  • Email subject line generation
  • Email body personalization
  • SMS/Push content generation
  • Integration with SPLIT step

Phase 3: Send Time Optimization (Weeks 5-6)

  • Build send time profiles from history
  • STO model training
  • Campaign scheduling with STO

Phase 4: Natural Language Segments (Weeks 7-8)

  • NL parsing service
  • Schema context builder
  • UI integration
  • Validation and preview

Phase 5: Journey Optimization (Weeks 9-10)

  • Journey analytics aggregation
  • Optimization suggestion engine
  • Auto-journey generation (beta)

Phase 6: Churn Interventions (Weeks 11-12)

  • Intervention recommendation engine
  • LLM intervention strategist
  • Effectiveness tracking
  • Closed-loop learning

17. Events

// Add to crm.events.ts

export enum CrmEvent {
// ... existing events

// AI Content events
AIContentGenerated = 'crm.ai.content.generated',
AIContentSelected = 'crm.ai.content.selected',

// STO events
STOProfileBuilt = 'crm.ai.sto.profile_built',
STOTimeSelected = 'crm.ai.sto.time_selected',

// NL Segment events
NLSegmentParsed = 'crm.ai.segment.parsed',
NLSegmentCreated = 'crm.ai.segment.created',

// Journey optimization events
JourneyOptimizationSuggested = 'crm.ai.journey.optimization_suggested',
JourneyOptimizationApplied = 'crm.ai.journey.optimization_applied',

// Churn intervention events
ChurnInterventionRecommended = 'crm.ai.churn.intervention_recommended',
ChurnInterventionExecuted = 'crm.ai.churn.intervention_executed',
ChurnInterventionMeasured = 'crm.ai.churn.intervention_measured',

// Resilience events
AIFallbackUsed = 'crm.ai.fallback.used',
AICircuitBreakerOpened = 'crm.ai.circuit_breaker.opened',
AICircuitBreakerClosed = 'crm.ai.circuit_breaker.closed',

// Human-in-the-Loop events
ContentApprovalRequested = 'crm.ai.content.approval_requested',
ContentApprovalCompleted = 'crm.ai.content.approval_completed',
ContentApprovalExpired = 'crm.ai.content.approval_expired',
InterventionApprovalRequested = 'crm.ai.intervention.approval_requested',

// Cost monitoring events
AICostWarning = 'crm.ai.cost.warning',
AICostCritical = 'crm.ai.cost.critical',
AIBudgetWarning = 'crm.ai.budget.warning',
AIThrottled = 'crm.ai.throttled',
AIDisabled = 'crm.ai.disabled',

// Configuration events
AIConfigUpdated = 'crm.ai.config.updated',
AIFeatureToggled = 'crm.ai.feature.toggled',
}