elevenlabs
Adapted from the Griptape AI Framework documentation.
__all__ = ['ElevenLabsTextToSpeechDriver']
module-attribute
Bases:
BaseTextToSpeechDriver
Source Code in griptape/drivers/text_to_speech/elevenlabs_text_to_speech_driver.py
@define class ElevenLabsTextToSpeechDriver(BaseTextToSpeechDriver): api_key: str = field(kw_only=True, metadata={"serializable": True}) voice: str = field(kw_only=True, metadata={"serializable": True}) output_format: str = field(default="mp3_44100_128", kw_only=True, metadata={"serializable": True}) _client: Optional[ElevenLabs] = field(default=None, kw_only=True, alias="client", metadata={"serializable": False}) @lazy_property() def client(self) -> ElevenLabs: return import_optional_dependency("elevenlabs.client").ElevenLabs(api_key=self.api_key) def try_text_to_audio(self, prompts: list[str]) -> AudioArtifact: audio = self.client.generate( text=". ".join(prompts), voice=self.voice, model=self.model, output_format=self.output_format, ) content = b"" for chunk in audio: content += chunk # All ElevenLabs audio format strings have the following structure: # {format}_{sample_rate}_{bitrate} artifact_format = self.output_format.split("_")[0] return AudioArtifact(value=content, format=artifact_format)
_client = field(default=None, kw_only=True, alias='client', metadata={'serializable': False})
class-attribute instance-attributeapi_key = field(kw_only=True, metadata={'serializable': True})
class-attribute instance-attributeoutput_format = field(default='mp3_44100_128', kw_only=True, metadata={'serializable': True})
class-attribute instance-attributevoice = field(kw_only=True, metadata={'serializable': True})
class-attribute instance-attribute
client()
Source Code in griptape/drivers/text_to_speech/elevenlabs_text_to_speech_driver.py
@lazy_property() def client(self) -> ElevenLabs: return import_optional_dependency("elevenlabs.client").ElevenLabs(api_key=self.api_key)
try_text_to_audio(prompts)
Source Code in griptape/drivers/text_to_speech/elevenlabs_text_to_speech_driver.py
def try_text_to_audio(self, prompts: list[str]) -> AudioArtifact: audio = self.client.generate( text=". ".join(prompts), voice=self.voice, model=self.model, output_format=self.output_format, ) content = b"" for chunk in audio: content += chunk # All ElevenLabs audio format strings have the following structure: # {format}_{sample_rate}_{bitrate} artifact_format = self.output_format.split("_")[0] return AudioArtifact(value=content, format=artifact_format)
- On this page
- client()
- try_text_to_audio(prompts)
Could this page be better? Report a problem or suggest an addition!