mirror of
https://github.com/deepseek-ai/awesome-deepseek-integration.git
synced 2025-06-28 04:21:50 -04:00
This commit introduces a new project, `deepseek_content_moderation`, designed to detect sensitive content in text based on configurable keyword lists. Key features include: - Customizable categories of sensitive words stored in `config.json`. - A `Moderator` class (`moderator.py`) that loads the configuration and uses regex for case-insensitive, whole-word matching. - The `analyze_text` method returns a dictionary of triggered categories and the specific words found. - Comprehensive unit tests (`tests/test_moderator.py`) using pytest ensure the functionality of the `Moderator` class. - A detailed `README.md` provides an overview, setup instructions, usage examples, and testing guidelines. The project structure has been set up to be a valid Python package, with the main directory named `deepseek_content_moderation`. This project serves as a foundational component for applications requiring basic content filtering capabilities.
58 lines
1.2 KiB
JSON
58 lines
1.2 KiB
JSON
{
|
|
"Profanity": [
|
|
"swearword1",
|
|
"vulgarterm1",
|
|
"explicitlang1"
|
|
],
|
|
"HateSpeech": [
|
|
"hatespeech_slur1",
|
|
"derogatory_term1",
|
|
"incitement_example1"
|
|
],
|
|
"DiscriminatoryLanguage": [
|
|
"stereotype_example1",
|
|
"biased_phrase1",
|
|
"microaggression_example1"
|
|
],
|
|
"SexuallyExplicitLanguage": [
|
|
"sexual_act_description1",
|
|
"explicit_anatomical_term1",
|
|
"suggestive_innuendo1"
|
|
],
|
|
"ViolenceGore": [
|
|
"graphic_violence_desc1",
|
|
"torture_example1",
|
|
"weapon_for_harm1"
|
|
],
|
|
"SelfHarmSuicide": [
|
|
"selfharm_method1",
|
|
"suicidal_ideation_phrase1",
|
|
"encouragement_selfharm1"
|
|
],
|
|
"IllegalActivities": [
|
|
"drug_use_term1",
|
|
"illegal_weapon_term1",
|
|
"terrorism_related_term1"
|
|
],
|
|
"BlasphemyReligiousInsults": [
|
|
"religious_insult1",
|
|
"disrespectful_term_religion1",
|
|
"offensive_to_belief1"
|
|
],
|
|
"MedicalMisinformation": [
|
|
"unproven_medical_advice1",
|
|
"dangerous_health_claim1",
|
|
"harmful_pseudo_treatment1"
|
|
],
|
|
"PrivacyViolatingPII": [
|
|
"personal_name_example",
|
|
"address_example_term",
|
|
"phone_number_example_term"
|
|
],
|
|
"OffensiveSlangCulturalInsults": [
|
|
"cultural_slang_insult1",
|
|
"derogatory_cultural_term1",
|
|
"offensive_local_slang1"
|
|
]
|
|
}
|