[[["Easy to understand","easyToUnderstand","thumb-up"],["Solved my problem","solvedMyProblem","thumb-up"],["Other","otherUp","thumb-up"]],[["Hard to understand","hardToUnderstand","thumb-down"],["Incorrect information or sample code","incorrectInformationOrSampleCode","thumb-down"],["Missing the information/samples I need","missingTheInformationSamplesINeed","thumb-down"],["Other","otherDown","thumb-down"]],["Last updated 2025-04-09 UTC."],[[["This documentation covers the `SafetySetting.Types.HarmBlockMethod` enum within the Google Cloud AI Platform v1beta1 API."],["The `HarmBlockMethod` enum specifies how to block content based on harmful content detection."],["There are three possible methods: `Probability`, which uses probability scores, `Severity`, which uses both probability and severity scores, and `Unspecified`, which indicates an unspecified method."],["The namespace in this documentation is `Google.Cloud.AIPlatform.V1Beta1`."]]],[]]