导航菜单

页面标题

页面副标题

Tasker v6.6.3 - RequestRecognize$RecognitionConfig.java 源代码

正在查看: Tasker v6.6.3 应用的 RequestRecognize$RecognitionConfig.java JAVA 源代码文件

本页面展示 JAVA 反编译生成的源代码文件,支持语法高亮显示。 仅供安全研究与技术分析使用,严禁用于任何非法用途。请遵守相关法律法规。


package com.joaomgcd.taskerm.google.speechtotext;

import androidx.annotation.Keep;
import ck.a;
import ck.b;
import java.util.HashMap;
import jk.h;

@Keep
public final class RequestRecognize$RecognitionConfig {
    public static final int $stable = 8;
    private final Integer audioChannelCount;
    private final Boolean enableAutomaticPunctuation;
    private final Boolean enableSeparateRecognitionPerChannel;
    private final Boolean enableWordTimeOffsets;
    private final Encoding encoding;
    private final String languageCode;
    private final Integer maxAlternatives;
    private final RecognitionMetadata metadata;
    private final String model;
    private final Boolean profanityFilter;
    private final Integer sampleRateHertz;
    private final SpeechContext[] speechContexts;
    private final Boolean useEnhanced;

    @Keep
    public static final class Encoding {
        private static final a $ENTRIES;
        private static final Encoding[] $VALUES;
        private final String description;
        public static final Encoding ENCODING_UNSPECIFIED = new Encoding("ENCODING_UNSPECIFIED", 0, "Not specified.");
        public static final Encoding LINEAR16 = new Encoding("LINEAR16", 1, "Uncompressed 16-bit signed little-endian samples (Linear PCM).");
        public static final Encoding FLAC = new Encoding("FLAC", 2, "`FLAC` (Free Lossless AudioCodec) is the recommended encoding because it islossless--therefore recognition is not compromised--and requires only about half the bandwidth of `LINEAR16`. `FLAC` stream encoding supports 16-bit and 24-bit samples, however, not all fields in `STREAMINFO` are supported.");
        public static final Encoding MULAW = new Encoding("MULAW", 3, "8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.");
        public static final Encoding AMR = new Encoding("AMR", 4, "Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.");
        public static final Encoding AMR_WB = new Encoding("AMR_WB", 5, "Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.");
        public static final Encoding OGG_OPUS = new Encoding("OGG_OPUS", 6, "Opus encoded audio frames in Ogg container ([OggOpus](https://wiki.xiph.org/OggOpus)). `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000.");
        public static final Encoding SPEEX_WITH_HEADER_BYTE = new Encoding("SPEEX_WITH_HEADER_BYTE", 7, "Although the use of lossy encodings is not recommended, if a very low bitrate encoding is required, `OGG_OPUS` is highly preferred over Speex encoding. The [Speex](https://speex.org/)  encoding supported by Cloud Speech API has a header byte in each block, as in MIME type `audio/x-speex-with-header-byte`. It is a variant of the RTP Speex encoding defined in [RFC 5574](https://tools.ietf.org/html/rfc5574). The stream is a sequence of blocks, one block per RTP packet. Each block starts with a byte containing the length of the block, in bytes, followed by one or more frames of Speex data, padded to an integral number of bytes (octets) as specified in RFC 5574. In other words, each RTP header is replaced with a single byte containing the block length. Only Speex wideband is supported. `sample_rate_hertz` must be 16000.");

        private static final Encoding[] $values() {
            return new Encoding[]{ENCODING_UNSPECIFIED, LINEAR16, FLAC, MULAW, AMR, AMR_WB, OGG_OPUS, SPEEX_WITH_HEADER_BYTE};
        }

        static {
            Encoding[] $values = $values();
            $VALUES = $values;
            $ENTRIES = b.a($values);
        }

        private Encoding(String str, int i, String str2) {
            this.description = str2;
        }

        public static a<Encoding> getEntries() {
            return $ENTRIES;
        }

        public static Encoding valueOf(String str) {
            return (Encoding) Enum.valueOf(Encoding.class, str);
        }

        public static Encoding[] values() {
            return (Encoding[]) $VALUES.clone();
        }

        public final String getDescription() {
            return this.description;
        }
    }

    @Keep
    public static final class InteractionType {
        private static final a $ENTRIES;
        private static final InteractionType[] $VALUES;
        private final String description;
        public static final InteractionType INTERACTION_TYPE_UNSPECIFIED = new InteractionType("INTERACTION_TYPE_UNSPECIFIED", 0, "Use case is either unknown or is something other than one of the othervalues below.");
        public static final InteractionType DISCUSSION = new InteractionType("DISCUSSION", 1, "Multiple people in a conversation or discussion. For example in ameeting with two or more people actively participating. Typically all the primary people speaking would be in the same room (if not, see PHONE_CALL)");
        public static final InteractionType PRESENTATION = new InteractionType("PRESENTATION", 2, "One or more persons lecturing or presenting to others, mostly uninterrupted.");
        public static final InteractionType PHONE_CALL = new InteractionType("PHONE_CALL", 3, "A phone-call or video-conference in which two or more people, who are not in the same room, are actively participating.");
        public static final InteractionType VOICEMAIL = new InteractionType("VOICEMAIL", 4, "A recorded message intended for another person to listen to.");
        public static final InteractionType PROFESSIONALLY_PRODUCED = new InteractionType("PROFESSIONALLY_PRODUCED", 5, "Professionally produced audio (eg. TV Show, Podcast).");
        public static final InteractionType VOICE_SEARCH = new InteractionType("VOICE_SEARCH", 6, "Transcribe spoken questions and queries into text.");
        public static final InteractionType VOICE_COMMAND = new InteractionType("VOICE_COMMAND", 7, "Transcribe voice commands, such as for controlling a device.");
        public static final InteractionType DICTATION = new InteractionType("DICTATION", 8, "Transcribe speech to text to create a written document, such as a text-message, email or report.");

        private static final InteractionType[] $values() {
            return new InteractionType[]{INTERACTION_TYPE_UNSPECIFIED, DISCUSSION, PRESENTATION, PHONE_CALL, VOICEMAIL, PROFESSIONALLY_PRODUCED, VOICE_SEARCH, VOICE_COMMAND, DICTATION};
        }

        static {
            InteractionType[] $values = $values();
            $VALUES = $values;
            $ENTRIES = b.a($values);
        }

        private InteractionType(String str, int i, String str2) {
            this.description = str2;
        }

        public static a<InteractionType> getEntries() {
            return $ENTRIES;
        }

        public static InteractionType valueOf(String str) {
            return (InteractionType) Enum.valueOf(InteractionType.class, str);
        }

        public static InteractionType[] values() {
            return (InteractionType[]) $VALUES.clone();
        }

        public final String getDescription() {
            return this.description;
        }
    }

    @Keep
    public static final class MicrophoneDistance {
        private static final a $ENTRIES;
        private static final MicrophoneDistance[] $VALUES;
        private final String description;
        public static final MicrophoneDistance MICROPHONE_DISTANCE_UNSPECIFIED = new MicrophoneDistance("MICROPHONE_DISTANCE_UNSPECIFIED", 0, "Audio type is not known.");
        public static final MicrophoneDistance NEARFIELD = new MicrophoneDistance("NEARFIELD", 1, "The audio was captured from a closely placed microphone. Eg. phone,dictaphone, or handheld microphone. Generally if there speaker is within1 meter of the microphone.");
        public static final MicrophoneDistance MIDFIELD = new MicrophoneDistance("MIDFIELD", 2, "The speaker if within 3 meters of the microphone.");
        public static final MicrophoneDistance FARFIELD = new MicrophoneDistance("FARFIELD", 3, "The speaker is more than 3 meters away from the microphone.");

        private static final MicrophoneDistance[] $values() {
            return new MicrophoneDistance[]{MICROPHONE_DISTANCE_UNSPECIFIED, NEARFIELD, MIDFIELD, FARFIELD};
        }

        static {
            MicrophoneDistance[] $values = $values();
            $VALUES = $values;
            $ENTRIES = b.a($values);
        }

        private MicrophoneDistance(String str, int i, String str2) {
            this.description = str2;
        }

        public static a<MicrophoneDistance> getEntries() {
            return $ENTRIES;
        }

        public static MicrophoneDistance valueOf(String str) {
            return (MicrophoneDistance) Enum.valueOf(MicrophoneDistance.class, str);
        }

        public static MicrophoneDistance[] values() {
            return (MicrophoneDistance[]) $VALUES.clone();
        }

        public final String getDescription() {
            return this.description;
        }
    }

    @Keep
    public static final class OriginalMediaType {
        private static final a $ENTRIES;
        private static final OriginalMediaType[] $VALUES;
        private final String description;
        public static final OriginalMediaType ORIGINAL_MEDIA_TYPE_UNSPECIFIED = new OriginalMediaType("ORIGINAL_MEDIA_TYPE_UNSPECIFIED", 0, "Unknown original media type.");
        public static final OriginalMediaType AUDIO = new OriginalMediaType("AUDIO", 1, "The speech data is an audio recording.");
        public static final OriginalMediaType VIDEO = new OriginalMediaType("VIDEO", 2, "The speech data originally recorded on a video.");

        private static final OriginalMediaType[] $values() {
            return new OriginalMediaType[]{ORIGINAL_MEDIA_TYPE_UNSPECIFIED, AUDIO, VIDEO};
        }

        static {
            OriginalMediaType[] $values = $values();
            $VALUES = $values;
            $ENTRIES = b.a($values);
        }

        private OriginalMediaType(String str, int i, String str2) {
            this.description = str2;
        }

        public static a<OriginalMediaType> getEntries() {
            return $ENTRIES;
        }

        public static OriginalMediaType valueOf(String str) {
            return (OriginalMediaType) Enum.valueOf(OriginalMediaType.class, str);
        }

        public static OriginalMediaType[] values() {
            return (OriginalMediaType[]) $VALUES.clone();
        }

        public final String getDescription() {
            return this.description;
        }
    }

    @Keep
    public static final class RecognitionMetadata {
        public static final int $stable = 0;
        private final String audioTopic;
        private final Integer industryNaicsCodeOfAudio;
        private final InteractionType interactionType;
        private final MicrophoneDistance microphoneDistance;
        private final String obfuscatedId;
        private final OriginalMediaType originalMediaType;
        private final String originalMimeType;
        private final String recordingDeviceName;
        private final RecordingDeviceType recordingDeviceType;

        public RecognitionMetadata() {
            this(null, null, null, null, null, null, null, null, null, 511, null);
        }

        public final String getAudioTopic() {
            return this.audioTopic;
        }

        public final Integer getIndustryNaicsCodeOfAudio() {
            return this.industryNaicsCodeOfAudio;
        }

        public final InteractionType getInteractionType() {
            return this.interactionType;
        }

        public final MicrophoneDistance getMicrophoneDistance() {
            return this.microphoneDistance;
        }

        public final String getObfuscatedId() {
            return this.obfuscatedId;
        }

        public final OriginalMediaType getOriginalMediaType() {
            return this.originalMediaType;
        }

        public final String getOriginalMimeType() {
            return this.originalMimeType;
        }

        public final String getRecordingDeviceName() {
            return this.recordingDeviceName;
        }

        public final RecordingDeviceType getRecordingDeviceType() {
            return this.recordingDeviceType;
        }

        public RecognitionMetadata(InteractionType interactionType, String str, String str2, String str3, MicrophoneDistance microphoneDistance, Integer num, OriginalMediaType originalMediaType, RecordingDeviceType recordingDeviceType, String str4) {
            this.interactionType = interactionType;
            this.audioTopic = str;
            this.originalMimeType = str2;
            this.recordingDeviceName = str3;
            this.microphoneDistance = microphoneDistance;
            this.industryNaicsCodeOfAudio = num;
            this.originalMediaType = originalMediaType;
            this.recordingDeviceType = recordingDeviceType;
            this.obfuscatedId = str4;
        }

        public RecognitionMetadata(InteractionType interactionType, String str, String str2, String str3, MicrophoneDistance microphoneDistance, Integer num, OriginalMediaType originalMediaType, RecordingDeviceType recordingDeviceType, String str4, int i, h hVar) {
            this((i & 1) != 0 ? null : interactionType, (i & 2) != 0 ? null : str, (i & 4) != 0 ? null : str2, (i & 8) != 0 ? null : str3, (i & 16) != 0 ? null : microphoneDistance, (i & 32) != 0 ? null : num, (i & 64) != 0 ? null : originalMediaType, (i & 128) != 0 ? null : recordingDeviceType, (i & 256) == 0 ? str4 : null);
        }
    }

    @Keep
    public static final class RecordingDeviceType {
        private static final a $ENTRIES;
        private static final RecordingDeviceType[] $VALUES;
        private final String description;
        public static final RecordingDeviceType RECORDING_DEVICE_TYPE_UNSPECIFIED = new RecordingDeviceType("RECORDING_DEVICE_TYPE_UNSPECIFIED", 0, "The recording device is unknown.");
        public static final RecordingDeviceType SMARTPHONE = new RecordingDeviceType("SMARTPHONE", 1, "Speech was recorded on a smartphone.");
        public static final RecordingDeviceType PC = new RecordingDeviceType("PC", 2, "Speech was recorded using a personal computer or tablet.");
        public static final RecordingDeviceType PHONE_LINE = new RecordingDeviceType("PHONE_LINE", 3, "Speech was recorded over a phone line.");
        public static final RecordingDeviceType VEHICLE = new RecordingDeviceType("VEHICLE", 4, "Speech was recorded in a vehicle.");
        public static final RecordingDeviceType OTHER_OUTDOOR_DEVICE = new RecordingDeviceType("OTHER_OUTDOOR_DEVICE", 5, "Speech was recorded outdoors.");
        public static final RecordingDeviceType OTHER_INDOOR_DEVICE = new RecordingDeviceType("OTHER_INDOOR_DEVICE", 6, "Speech was recorded indoors.");

        private static final RecordingDeviceType[] $values() {
            return new RecordingDeviceType[]{RECORDING_DEVICE_TYPE_UNSPECIFIED, SMARTPHONE, PC, PHONE_LINE, VEHICLE, OTHER_OUTDOOR_DEVICE, OTHER_INDOOR_DEVICE};
        }

        static {
            RecordingDeviceType[] $values = $values();
            $VALUES = $values;
            $ENTRIES = b.a($values);
        }

        private RecordingDeviceType(String str, int i, String str2) {
            this.description = str2;
        }

        public static a<RecordingDeviceType> getEntries() {
            return $ENTRIES;
        }

        public static RecordingDeviceType valueOf(String str) {
            return (RecordingDeviceType) Enum.valueOf(RecordingDeviceType.class, str);
        }

        public static RecordingDeviceType[] values() {
            return (RecordingDeviceType[]) $VALUES.clone();
        }

        public final String getDescription() {
            return this.description;
        }
    }

    @Keep
    public static final class SpeechContext {
        public static final int $stable = 8;
        private final HashMap<String, Object>[] phrases;

        public SpeechContext() {
            this(null, 1, 0 == true ? 1 : 0);
        }

        public final HashMap<String, Object>[] getPhrases() {
            return this.phrases;
        }

        public SpeechContext(HashMap<String, Object>[] hashMapArr) {
            this.phrases = hashMapArr;
        }

        public SpeechContext(HashMap[] hashMapArr, int i, h hVar) {
            this((i & 1) != 0 ? null : hashMapArr);
        }
    }

    public RequestRecognize$RecognitionConfig() {
        this(null, null, null, null, null, null, null, null, null, null, null, null, null, 8191, null);
    }

    public final Integer getAudioChannelCount() {
        return this.audioChannelCount;
    }

    public final Boolean getEnableAutomaticPunctuation() {
        return this.enableAutomaticPunctuation;
    }

    public final Boolean getEnableSeparateRecognitionPerChannel() {
        return this.enableSeparateRecognitionPerChannel;
    }

    public final Boolean getEnableWordTimeOffsets() {
        return this.enableWordTimeOffsets;
    }

    public final Encoding getEncoding() {
        return this.encoding;
    }

    public final String getLanguageCode() {
        return this.languageCode;
    }

    public final Integer getMaxAlternatives() {
        return this.maxAlternatives;
    }

    public final RecognitionMetadata getMetadata() {
        return this.metadata;
    }

    public final String getModel() {
        return this.model;
    }

    public final Boolean getProfanityFilter() {
        return this.profanityFilter;
    }

    public final Integer getSampleRateHertz() {
        return this.sampleRateHertz;
    }

    public final SpeechContext[] getSpeechContexts() {
        return this.speechContexts;
    }

    public final Boolean getUseEnhanced() {
        return this.useEnhanced;
    }

    public RequestRecognize$RecognitionConfig(Boolean bool, Integer num, Encoding encoding, SpeechContext[] speechContextArr, String str, Integer num2, Boolean bool2, String str2, Boolean bool3, Boolean bool4, RecognitionMetadata recognitionMetadata, Integer num3, Boolean bool5) {
        this.enableAutomaticPunctuation = bool;
        this.maxAlternatives = num;
        this.encoding = encoding;
        this.speechContexts = speechContextArr;
        this.model = str;
        this.audioChannelCount = num2;
        this.enableWordTimeOffsets = bool2;
        this.languageCode = str2;
        this.profanityFilter = bool3;
        this.useEnhanced = bool4;
        this.metadata = recognitionMetadata;
        this.sampleRateHertz = num3;
        this.enableSeparateRecognitionPerChannel = bool5;
    }

    public RequestRecognize$RecognitionConfig(Boolean bool, Integer num, Encoding encoding, SpeechContext[] speechContextArr, String str, Integer num2, Boolean bool2, String str2, Boolean bool3, Boolean bool4, RecognitionMetadata recognitionMetadata, Integer num3, Boolean bool5, int i, h hVar) {
        this((i & 1) != 0 ? null : bool, (i & 2) != 0 ? null : num, (i & 4) != 0 ? null : encoding, (i & 8) != 0 ? null : speechContextArr, (i & 16) != 0 ? null : str, (i & 32) != 0 ? null : num2, (i & 64) != 0 ? null : bool2, (i & 128) != 0 ? null : str2, (i & 256) != 0 ? null : bool3, (i & 512) != 0 ? null : bool4, (i & 1024) != 0 ? null : recognitionMetadata, (i & 2048) != 0 ? null : num3, (i & 4096) == 0 ? bool5 : null);
    }
}