Moderate Text
The Copyleaks Text Moderation API provides real-time content moderation capabilities to help you maintain safe and appropriate content across your platform. This API automatically scans and flags potentially harmful content across multiple categories, enabling you to take appropriate action to protect your users and maintain community standards.
Request
Section titled “Request”Path Parameters
Section titled “Path Parameters”A unique scan id provided by you. We recommend you use the same id in your database to represent the scan in the Copyleaks database. This will help you to debug incidents. Using the same ID for the same file will help you to avoid network problems that may lead to multiple scans for the same file. learn more about the criteria for creating a Scan ID.
>= 3 characters
<= 36 characters
Match pattern: [a-z0-9] !@$^&-+%=_(){}<>';:/.",~
|
Headers
Section titled “Headers”Content-Type: application/jsonAuthorization: Bearer YOUR_LOGIN_TOKEN
Request Body
Section titled “Request Body”The request body is a JSON object containing the text to scan.
Text to produce Text Moderation report for.
>= 1 characters
<= 25000 characters
Use sandbox mode to test your integration with the Copyleaks API without consuming any credits.
Submit content for Text Moderation and get returned mock results, simulating Copyleaks’ API functionality to ensure you have successfully integrated the API.
This feature is intended to be used for development purposes only.
The language code of your content. The selected language should be on the Supported Languages list above. If the ‘language’ field is not specified, our system will automatically detect the language of the content.
Example: "en"
labels array[object]
A list of label configurations to be used for the moderation process.
Identifier for the label. List of moderation labels.
>= 1 characters
<= 32 characters
Responses
Section titled “Responses”The scan was Created.
Response Schema
The response contains the following fields:
moderations object
text object
chars object
scannedDocument object
Example Response
A typical response from this endpoint:
{ "modelVersion": "v1", "moderations": { "text": { "chars": { "labels": [ 4, 4, 4, 2, 7, 6 ], "starts": [ 15,// ... truncated
Examples
Section titled “Examples”POST https://api.copyleaks.com/v1/text-moderation/my-scan-123/checkContent-Type: application/jsonAuthorization: Bearer YOUR_LOGIN_TOKEN
{ "text": "Your text content to be moderated goes here.", "sandbox": true, "labels": [ { "id": "toxic-v1" }, { "id": "profanity-v1" }, { "id": "hate-speech-v1" } ]}
curl --request POST \ --url https://api.copyleaks.com/v1/text-moderation/my-scan-123/check \ --header 'Authorization: Bearer YOUR_LOGIN_TOKEN' \ --header 'Content-Type: application/json' \ --data '{ "text": "Your text content to be moderated goes here.", "sandbox": true, "labels": [ { "id": "toxic-v1" }, { "id": "profanity-v1" }, { "id": "hate-speech-v1" } ] }'
import requests
# Sample text to moderatesample_text = "Your text content to be moderated goes here."
url = "https://api.copyleaks.com/v1/text-moderation/my-scan-123/check"payload = { "text": sample_text, "sandbox": True, "language": "en", "labels": [ {"id": "other-v1"}, {"id": "adult-v1"}, {"id": "toxic-v1"}, {"id": "violent-v1"}, {"id": "profanity-v1"}, {"id": "self-harm-v1"}, {"id": "harassment-v1"}, {"id": "hate-speech-v1"}, {"id": "drugs-v1"}, {"id": "firearms-v1"}, {"id": "cybersecurity-v1"} ]}headers = { "Authorization": "Bearer YOUR_LOGIN_TOKEN", "Content-Type": "application/json", "Accept": "application/json"}
response = requests.post(url, json=payload, headers=headers)result = response.json()
print("Text Moderation Results:")if 'labels' in result: for label in result['labels']: print(f"Label: {label.get('id')}, Score: {label.get('score', 'N/A')}")
print("Full response:", result)
const { Copyleaks, CopyleaksTextModerationRequestModel } = require('plagiarism-checker');
async function moderateText() { try { // Initialize Copyleaks const copyleaks = new Copyleaks();
// Login to get the authentication token object. // Replace with your email and API key.
const scanId = "my-scan-123";
// The text to be moderated const textToModerate = "Your text content to be moderated goes here.";
// Create a submission model const submission = new CopyleaksTextModerationRequestModel({ text: textToModerate, sandbox: true, language: "en", labels: [ { id: "toxic-v1" }, { id: "profanity-v1" }, { id: "hate-speech-v1" } ] });
// Submit the text for moderation const response = await copyleaks.textModerationClient.submitTextAsync(loginResult, scanId, submission); console.log("Moderation results:", response);
} catch (error) { console.error("An error occurred:", error); }}
moderateText();
import classes.Copyleaks;import models.request.TextModeration.CopyleaksTextModerationRequest;import models.request.TextModeration.Label;import models.response.textModeration.CopyleaksTextModerationResponseModel;
public class TextModerationExample { private static final String API_KEY = "00000000-0000-0000-0000-000000000000";
public static void main(String[] args) { try { // Login to Copyleaks String authToken = Copyleaks.login(EMAIL_ADDRESS, API_KEY); System.out.println("Logged successfully!\nToken: " + authToken);
// Sample text to moderate String sampleText = "Your text content to be moderated goes here.";
// Create text moderation request with all available labels String scanId = "my-scan-123"; CopyleaksTextModerationRequest moderationRequest = new CopyleaksTextModerationRequest( sampleText, true, // sandbox "en", // language new Label[] { new Label("other-v1"), new Label("adult-v1"), new Label("toxic-v1"), new Label("violent-v1"), new Label("profanity-v1"), new Label("self-harm-v1"), new Label("harassment-v1"), new Label("hate-speech-v1"), new Label("drugs-v1"), new Label("firearms-v1"), new Label("cybersecurity-v1") } );
// Submit for text moderation analysis CopyleaksTextModerationResponseModel result = Copyleaks.textModerationClient.submitText( authToken, scanId, moderationRequest );
System.out.println("Text moderation submitted successfully!"); System.out.println("Moderation Results:");
if (result.getLabels() != null) { for (var label : result.getLabels()) { System.out.println("Label: " + label.getId() + ", Score: " + label.getScore()); } }
System.out.println("Full result: " + result);
} catch (Exception e) { System.out.println("Failed: " + e.getMessage()); e.printStackTrace(); } }}