{ "type": "REST", "operations": [ { "name": "Validate", "key": "validate", "inputParameters": [ { "name": "text", "displayName": "Content to check and sanitize", "type": "string", "required": true, "bodyLocation": "prompt" } ], "outputParameters": [ { "name": "passed", "displayName": "Passed", "type": "boolean", "applyMappingConfig": "always" }, { "name": "reason", "displayName": "Reason", "type": "string", "applyMappingConfig": "always" }, { "name": "sanitizedContent", "displayName": "Sanitized Content", "type": "string", "applyMappingConfig": "always" } ], "config": { "method": "POST", "url": "analyze/prompt", "httpResponseHandler": { "type": "script", "implementation": { "language": "javascript", "script": "var statusCode = flwHttpResponse.getStatusCode();\n\nif(statusCode === 401 || statusCode === 403) {\n throw 'LLM Guard authentication failed (HTTP ' + statusCode + '). Verify the auth token is configured correctly.';\n}\n\nif(statusCode < 200 || statusCode >= 300) {\n throw 'LLM Guard API returned HTTP ' + statusCode + ': ' + flwHttpResponse.getBody();\n}\n\nvar json = flw.json.stringToJson(flwHttpResponse.getBody());\nvar result = flw.json.createObject();\nvar isValid = json.path('is_valid').asBoolean();\nvar sanitizedPromptNode = json.path('sanitized_prompt');\nvar hasSanitized = sanitizedPromptNode.isValue() && flw.string.hasText(sanitizedPromptNode.asString());\n\nif(!isValid && hasSanitized) {\n // Content had PII but was sanitized \u2014 pass with rewritten content\n result.putBoolean('passed', true);\n result.putString('reason', '');\n result.putString('sanitizedContent', sanitizedPromptNode.asString());\n} else if(!isValid) {\n // Content failed validation and can't be sanitized (e.g., toxicity, prompt injection)\n result.putBoolean('passed', false);\n var scanners = json.path('scanners');\n var failedScanners = [];\n var scannerNames = scanners.fieldNames();\n for (var i = 0; i < scannerNames.size(); i++) {\n var scannerName = scannerNames.get(i);\n var score = scanners.path(scannerName).asDouble();\n if (score > 0.5) {\n failedScanners.push(scannerName + ' (score: ' + score + ')');\n }\n }\n result.putString('reason', 'LLM Guard flagged: ' + failedScanners.join(', '));\n result.putString('sanitizedContent', '');\n} else {\n result.putBoolean('passed', true);\n result.putString('reason', '');\n result.putString('sanitizedContent', '');\n}\n\nflwHttpResponse.setBody(flw.json.jsonToString(result));" } } }, "type": "search" } ], "config": { "baseUrl": "http://localhost:8000", "authorization": { "type": "bearer", "value": "my-llm-guard-token" } }, "name": "LLM Guard Guardrail (with sanitization)", "key": "llmGuardWithSanitizeGuardrail", "description": "Content scanning and sanitization using LLM Guard (Protect AI). Detects prompt injection, toxicity, PII, and more. Sanitizes content by replacing detected PII with placeholders before the LLM call." }