Text Generation
This guide walks you through generating text content using the neuroflash API — from selecting a model to streaming responses and applying a brand voice.
What you'll build
By the end of this guide, you will:
- Authenticate with the neuroflash API
- Fetch your workspace
- List available AI models
- Send a chat completion request
- Stream a response in real-time
- Use multi-turn messages for context
- Apply a brand voice for consistent tone
Prerequisites
- A neuroflash account with API access
- Your
client_idandclient_secret(see Authentication)
Step 1: Authenticate
- cURL
- Python
- Node.js
- Go
curl -X POST https://id.neuroflash.com/oauth/v2/token \
-H "Content-Type: application/x-www-form-urlencoded" \
-d "grant_type=client_credentials" \
-d "client_id=YOUR_CLIENT_ID" \
-d "client_secret=YOUR_CLIENT_SECRET" \
-d "scope=openid"
import requests
BASE_URL = "https://app.neuroflash.com/api"
token = requests.post(
"https://id.neuroflash.com/oauth/v2/token",
data={
"grant_type": "client_credentials",
"client_id": "YOUR_CLIENT_ID",
"client_secret": "YOUR_CLIENT_SECRET",
"scope": "openid",
},
).json()["access_token"]
headers = {"Authorization": f"Bearer {token}"}
const BASE_URL = "https://app.neuroflash.com/api";
const { access_token } = await fetch(
"https://id.neuroflash.com/oauth/v2/token",
{
method: "POST",
headers: { "Content-Type": "application/x-www-form-urlencoded" },
body: new URLSearchParams({
grant_type: "client_credentials",
client_id: "YOUR_CLIENT_ID",
client_secret: "YOUR_CLIENT_SECRET",
scope: "openid",
}),
}
).then((r) => r.json());
const headers = { Authorization: `Bearer ${access_token}` };
package main
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"strings"
)
const baseURL = "https://app.neuroflash.com/api"
func main() {
data := url.Values{
"grant_type": {"client_credentials"},
"client_id": {"YOUR_CLIENT_ID"},
"client_secret": {"YOUR_CLIENT_SECRET"},
"scope": {"openid"},
}
resp, _ := http.Post(
"https://id.neuroflash.com/oauth/v2/token",
"application/x-www-form-urlencoded",
strings.NewReader(data.Encode()),
)
defer resp.Body.Close()
var authResult struct {
AccessToken string `json:"access_token"`
}
json.NewDecoder(resp.Body).Decode(&authResult)
token := authResult.AccessToken
fmt.Println("Authenticated")
}
Step 2: Get Your Workspace
Fetch your available workspaces and select the first one. The workspace ID is required for most API calls:
- cURL
- Python
- Node.js
- Go
curl "https://app.neuroflash.com/api/workspace-service/v1/workspaces" \
-H "Authorization: Bearer YOUR_ACCESS_TOKEN"
workspaces = requests.get(
f"{BASE_URL}/workspace-service/v1/workspaces",
headers=headers,
).json()
workspace_id = workspaces["_embedded"]["workspaces"][0]["id"]
headers["x-workspace-id"] = workspace_id
print(f"Using workspace: {workspace_id}")
const workspaces = await fetch(
`${BASE_URL}/workspace-service/v1/workspaces`,
{ headers }
).then((r) => r.json());
const workspaceId = workspaces._embedded.workspaces[0].id;
headers["x-workspace-id"] = workspaceId;
console.log(`Using workspace: ${workspaceId}`);
req, _ := http.NewRequest("GET", baseURL+"/workspace-service/v1/workspaces", nil)
req.Header.Set("Authorization", "Bearer "+token)
resp, _ := http.DefaultClient.Do(req)
defer resp.Body.Close()
var wsResult struct {
Embedded struct {
Workspaces []struct {
ID string `json:"id"`
} `json:"workspaces"`
} `json:"_embedded"`
}
json.NewDecoder(resp.Body).Decode(&wsResult)
workspaceID := wsResult.Embedded.Workspaces[0].ID
fmt.Printf("Using workspace: %s\n", workspaceID)
Step 3: List Available Models
See which AI models are available for your pricing plan:
- cURL
- Python
- Node.js
- Go
curl "https://app.neuroflash.com/api/ds-prototypes/model_selection/models" \
-H "Authorization: Bearer YOUR_ACCESS_TOKEN" \
-H "x-workspace-id: YOUR_WORKSPACE_ID"
models = requests.get(
f"{BASE_URL}/ds-prototypes/model_selection/models",
headers=headers,
).json()
for model in models:
status = "available" if model["available"] else "upgrade required"
print(f"{model['id']} ({model['provider']}) — {status}")
const models = await fetch(
`${BASE_URL}/ds-prototypes/model_selection/models`,
{ headers }
).then((r) => r.json());
models.forEach((m) => {
const status = m.available ? "available" : "upgrade required";
console.log(`${m.id} (${m.provider}) — ${status}`);
});
req, _ := http.NewRequest("GET", baseURL+"/ds-prototypes/model_selection/models", nil)
req.Header.Set("Authorization", "Bearer "+token)
req.Header.Set("x-workspace-id", workspaceID)
resp, _ := http.DefaultClient.Do(req)
defer resp.Body.Close()
var models []struct {
ID string `json:"id"`
Provider string `json:"provider"`
Available bool `json:"available"`
}
json.NewDecoder(resp.Body).Decode(&models)
for _, m := range models {
status := "upgrade required"
if m.Available {
status = "available"
}
fmt.Printf("%s (%s) — %s\n", m.ID, m.Provider, status)
}
Response:
[
{
"id": "gpt-4.1-mini",
"name": "GPT-4.1 Mini",
"provider": "openai",
"description": "Fast and cost-effective model for most tasks",
"context_window": 128000,
"available": true,
"reasoning_model": false
},
{
"id": "claude-sonnet-4",
"name": "Claude Sonnet 4",
"provider": "anthropic",
"description": "Excellent for nuanced writing and analysis",
"context_window": 200000,
"available": true,
"reasoning_model": false
},
{
"id": "gpt-5",
"name": "GPT-5",
"provider": "openai",
"description": "Most capable OpenAI model with reasoning",
"context_window": 128000,
"available": true,
"reasoning_model": true
}
]
Model availability by plan:
| Plan | Models |
|---|---|
| Free | gpt-4.1-mini, mistral-7b-instruct, mistral-medium-3.1, claude-sonnet-4, gemini-2.5-flash |
| Pro | All Free models + gpt-5, gpt-4.1, gemini-2.5-pro |
| Business | All Pro models + claude-opus-4.1 |
Step 4: Send a Chat Completion
Send a message and get a response using the content generation endpoint:
- cURL
- Python
- Node.js
- Go
curl -X POST "https://app.neuroflash.com/api/ds-prototypes/content_generation/chat/completions" \
-H "Authorization: Bearer YOUR_ACCESS_TOKEN" \
-H "x-workspace-id: YOUR_WORKSPACE_ID" \
-H "Content-Type: application/json" \
-d '{
"model": "gpt-4.1-mini",
"messages": [
{
"role": "user",
"content": "Write an outline for a blog post about how small businesses can adopt sustainable technology practices."
}
],
"temperature": 0.7
}'
completion = requests.post(
f"{BASE_URL}/ds-prototypes/content_generation/chat/completions",
headers=headers,
json={
"model": "gpt-4.1-mini",
"messages": [
{
"role": "user",
"content": "Write an outline for a blog post about how small businesses can adopt sustainable technology practices.",
}
],
"temperature": 0.7,
},
).json()
content = completion["choices"][0]["message"]["content"]
print(content)
print(f"\nTokens used: {completion['usage']['total_tokens']}")
const completion = await fetch(
`${BASE_URL}/ds-prototypes/content_generation/chat/completions`,
{
method: "POST",
headers: { ...headers, "Content-Type": "application/json" },
body: JSON.stringify({
model: "gpt-4.1-mini",
messages: [
{
role: "user",
content:
"Write an outline for a blog post about how small businesses can adopt sustainable technology practices.",
},
],
temperature: 0.7,
}),
}
).then((r) => r.json());
console.log(completion.choices[0].message.content);
console.log(`\nTokens used: ${completion.usage.total_tokens}`);
body, _ := json.Marshal(map[string]any{
"model": "gpt-4.1-mini",
"messages": []map[string]string{
{"role": "user", "content": "Write an outline for a blog post about how small businesses can adopt sustainable technology practices."},
},
"temperature": 0.7,
})
req, _ := http.NewRequest("POST", baseURL+"/ds-prototypes/content_generation/chat/completions", bytes.NewReader(body))
req.Header.Set("Authorization", "Bearer "+token)
req.Header.Set("x-workspace-id", workspaceID)
req.Header.Set("Content-Type", "application/json")
resp, _ := http.DefaultClient.Do(req)
defer resp.Body.Close()
var completion struct {
Choices []struct {
Message struct {
Content string `json:"content"`
} `json:"message"`
} `json:"choices"`
Usage struct {
TotalTokens int `json:"total_tokens"`
} `json:"usage"`
}
json.NewDecoder(resp.Body).Decode(&completion)
fmt.Println(completion.Choices[0].Message.Content)
fmt.Printf("\nTokens used: %d\n", completion.Usage.TotalTokens)
Response:
{
"id": "chatcmpl-abc123",
"object": "chat.completion",
"model": "gpt-4.1-mini",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "# Blog Post Outline: Sustainable Tech for Small Businesses\n\n## 1. Introduction\n- The growing importance of sustainability...\n\n## 2. Start with Energy Efficiency\n..."
},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 28,
"completion_tokens": 245,
"total_tokens": 273,
"words_used": 364
}
}
Step 5: Stream a Response
For longer responses, use streaming to receive chunks in real-time via Server-Sent Events (SSE):
- cURL
- Python
- Node.js
- Go
curl -X POST "https://app.neuroflash.com/api/ds-prototypes/content_generation/chat/completions" \
-H "Authorization: Bearer YOUR_ACCESS_TOKEN" \
-H "x-workspace-id: YOUR_WORKSPACE_ID" \
-H "Content-Type: application/json" \
-d '{
"model": "gpt-4.1-mini",
"messages": [
{
"role": "user",
"content": "Write a short paragraph about renewable energy benefits."
}
],
"stream": true
}'
response = requests.post(
f"{BASE_URL}/ds-prototypes/content_generation/chat/completions",
headers=headers,
json={
"model": "gpt-4.1-mini",
"messages": [
{"role": "user", "content": "Write a short paragraph about renewable energy benefits."}
],
"stream": True,
},
stream=True,
)
for line in response.iter_lines():
if line:
line = line.decode("utf-8")
if line.startswith("data: ") and line != "data: [DONE]":
import json
chunk = json.loads(line[6:])
delta = chunk["choices"][0].get("delta", {})
if "content" in delta:
print(delta["content"], end="", flush=True)
print() # Newline at the end
const response = await fetch(
`${BASE_URL}/ds-prototypes/content_generation/chat/completions`,
{
method: "POST",
headers: { ...headers, "Content-Type": "application/json" },
body: JSON.stringify({
model: "gpt-4.1-mini",
messages: [{ role: "user", content: "Write a short paragraph about renewable energy benefits." }],
stream: true,
}),
}
);
const reader = response.body.getReader();
const decoder = new TextDecoder();
while (true) {
const { done, value } = await reader.read();
if (done) break;
const text = decoder.decode(value);
for (const line of text.split("\n")) {
if (line.startsWith("data: ") && line !== "data: [DONE]") {
const chunk = JSON.parse(line.slice(6));
const content = chunk.choices[0]?.delta?.content;
if (content) process.stdout.write(content);
}
}
}
console.log();
body, _ := json.Marshal(map[string]any{
"model": "gpt-4.1-mini",
"messages": []map[string]string{
{"role": "user", "content": "Write a short paragraph about renewable energy benefits."},
},
"stream": true,
})
req, _ := http.NewRequest("POST", baseURL+"/ds-prototypes/content_generation/chat/completions", bytes.NewReader(body))
req.Header.Set("Authorization", "Bearer "+token)
req.Header.Set("x-workspace-id", workspaceID)
req.Header.Set("Content-Type", "application/json")
resp, _ := http.DefaultClient.Do(req)
defer resp.Body.Close()
scanner := bufio.NewScanner(resp.Body)
for scanner.Scan() {
line := scanner.Text()
if strings.HasPrefix(line, "data: ") && line != "data: [DONE]" {
var chunk struct {
Choices []struct {
Delta struct {
Content string `json:"content"`
} `json:"delta"`
} `json:"choices"`
}
json.Unmarshal([]byte(line[6:]), &chunk)
if len(chunk.Choices) > 0 {
fmt.Print(chunk.Choices[0].Delta.Content)
}
}
}
fmt.Println()
SSE Stream Format:
data: {"id":"chatcmpl-abc","object":"chat.completion.chunk","choices":[{"index":0,"delta":{"role":"assistant","content":""},"finish_reason":null}]}
data: {"id":"chatcmpl-abc","object":"chat.completion.chunk","choices":[{"index":0,"delta":{"content":"In"},"finish_reason":null}]}
data: {"id":"chatcmpl-abc","object":"chat.completion.chunk","choices":[{"index":0,"delta":{"content":" today's"},"finish_reason":null}]}
...
data: {"id":"chatcmpl-abc","object":"chat.completion.chunk","choices":[{"index":0,"delta":{},"finish_reason":"stop"}],"usage":{"prompt_tokens":15,"completion_tokens":150,"total_tokens":165}}
data: [DONE]
Step 6: Multi-turn Messages
Include previous messages for context in follow-up requests. Pass the full message history in each request:
- cURL
- Python
- Node.js
- Go
curl -X POST "https://app.neuroflash.com/api/ds-prototypes/content_generation/chat/completions" \
-H "Authorization: Bearer YOUR_ACCESS_TOKEN" \
-H "x-workspace-id: YOUR_WORKSPACE_ID" \
-H "Content-Type: application/json" \
-d '{
"model": "gpt-4.1-mini",
"messages": [
{"role": "user", "content": "Write an outline for a blog post about sustainable technology."},
{"role": "assistant", "content": "# Sustainable Technology Blog Post\n\n1. Introduction..."},
{"role": "user", "content": "Make it more conversational and add a real-world example."}
]
}'
followup = requests.post(
f"{BASE_URL}/ds-prototypes/content_generation/chat/completions",
headers=headers,
json={
"model": "gpt-4.1-mini",
"messages": [
{"role": "user", "content": "Write an outline for a blog post about sustainable technology."},
{"role": "assistant", "content": "# Sustainable Technology Blog Post\n\n1. Introduction..."},
{"role": "user", "content": "Make it more conversational and add a real-world example."},
],
},
).json()
print(followup["choices"][0]["message"]["content"])
const followup = await fetch(
`${BASE_URL}/ds-prototypes/content_generation/chat/completions`,
{
method: "POST",
headers: { ...headers, "Content-Type": "application/json" },
body: JSON.stringify({
model: "gpt-4.1-mini",
messages: [
{ role: "user", content: "Write an outline for a blog post about sustainable technology." },
{ role: "assistant", content: "# Sustainable Technology Blog Post\n\n1. Introduction..." },
{
role: "user",
content: "Make it more conversational and add a real-world example.",
},
],
}),
}
).then((r) => r.json());
console.log(followup.choices[0].message.content);
body, _ := json.Marshal(map[string]any{
"model": "gpt-4.1-mini",
"messages": []map[string]string{
{"role": "user", "content": "Write an outline for a blog post about sustainable technology."},
{"role": "assistant", "content": "# Sustainable Technology Blog Post\n\n1. Introduction..."},
{"role": "user", "content": "Make it more conversational and add a real-world example."},
},
})
req, _ := http.NewRequest("POST", baseURL+"/ds-prototypes/content_generation/chat/completions", bytes.NewReader(body))
req.Header.Set("Authorization", "Bearer "+token)
req.Header.Set("x-workspace-id", workspaceID)
req.Header.Set("Content-Type", "application/json")
resp, _ := http.DefaultClient.Do(req)
defer resp.Body.Close()
var followup struct {
Choices []struct {
Message struct {
Content string `json:"content"`
} `json:"message"`
} `json:"choices"`
}
json.NewDecoder(resp.Body).Decode(&followup)
fmt.Println(followup.Choices[0].Message.Content)
Step 7: Apply a Brand Voice
Fetch a brand voice from your workspace and use its description as a system message for consistent tone:
- cURL
- Python
- Node.js
- Go
# List brand voices
curl "https://app.neuroflash.com/api/brand-voice-service/v1/workspaces/{workspace_id}/brand-voices?page=1&size=10" \
-H "Authorization: Bearer YOUR_ACCESS_TOKEN" \
-H "x-workspace-id: YOUR_WORKSPACE_ID"
# Use the brand voice description as a system message
curl -X POST "https://app.neuroflash.com/api/ds-prototypes/content_generation/chat/completions" \
-H "Authorization: Bearer YOUR_ACCESS_TOKEN" \
-H "x-workspace-id: YOUR_WORKSPACE_ID" \
-H "Content-Type: application/json" \
-d '{
"model": "gpt-4.1-mini",
"messages": [
{"role": "system", "content": "Follow this brand voice: YOUR_BRAND_VOICE_DESCRIPTION"},
{"role": "user", "content": "Write a product announcement for our new eco-friendly packaging."}
]
}'
# List brand voices
brand_voices = requests.get(
f"{BASE_URL}/brand-voice-service/v1/workspaces/{workspace_id}/brand-voices",
headers=headers,
params={"page": 1, "size": 10},
).json()
brand_voice = brand_voices["data"][0]
system_prompt = f"Follow this brand voice: {brand_voice['description']}"
# Use it as a system message
completion = requests.post(
f"{BASE_URL}/ds-prototypes/content_generation/chat/completions",
headers=headers,
json={
"model": "gpt-4.1-mini",
"messages": [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Write a product announcement for our new eco-friendly packaging."},
],
},
).json()
print(completion["choices"][0]["message"]["content"])
// List brand voices
const brandVoices = await fetch(
`${BASE_URL}/brand-voice-service/v1/workspaces/${workspaceId}/brand-voices?page=1&size=10`,
{ headers }
).then((r) => r.json());
const brandVoice = brandVoices.data[0];
const systemPrompt = `Follow this brand voice: ${brandVoice.description}`;
// Use it as a system message
const completion = await fetch(
`${BASE_URL}/ds-prototypes/content_generation/chat/completions`,
{
method: "POST",
headers: { ...headers, "Content-Type": "application/json" },
body: JSON.stringify({
model: "gpt-4.1-mini",
messages: [
{ role: "system", content: systemPrompt },
{
role: "user",
content: "Write a product announcement for our new eco-friendly packaging.",
},
],
}),
}
).then((r) => r.json());
console.log(completion.choices[0].message.content);
// List brand voices
bvURL := fmt.Sprintf("%s/brand-voice-service/v1/workspaces/%s/brand-voices?page=1&size=10", baseURL, workspaceID)
req, _ := http.NewRequest("GET", bvURL, nil)
req.Header.Set("Authorization", "Bearer "+token)
req.Header.Set("x-workspace-id", workspaceID)
resp, _ := http.DefaultClient.Do(req)
defer resp.Body.Close()
var brandVoices struct {
Data []struct {
ID string `json:"id"`
Description string `json:"description"`
} `json:"data"`
}
json.NewDecoder(resp.Body).Decode(&brandVoices)
systemPrompt := fmt.Sprintf("Follow this brand voice: %s", brandVoices.Data[0].Description)
// Use it as a system message
body, _ := json.Marshal(map[string]any{
"model": "gpt-4.1-mini",
"messages": []map[string]string{
{"role": "system", "content": systemPrompt},
{"role": "user", "content": "Write a product announcement for our new eco-friendly packaging."},
},
})
req, _ = http.NewRequest("POST", baseURL+"/ds-prototypes/content_generation/chat/completions", bytes.NewReader(body))
req.Header.Set("Authorization", "Bearer "+token)
req.Header.Set("x-workspace-id", workspaceID)
req.Header.Set("Content-Type", "application/json")
resp, _ = http.DefaultClient.Do(req)
defer resp.Body.Close()
var completion struct {
Choices []struct {
Message struct {
Content string `json:"content"`
} `json:"message"`
} `json:"choices"`
}
json.NewDecoder(resp.Body).Decode(&completion)
fmt.Println(completion.Choices[0].Message.Content)
Next Steps
- Explore all available models
- Learn about chat completions parameters
- Set up brand voices for your organization