Spark 3.5.5 ScalaDoc - org.apache.spark.sql.streaming.DataStreamWriter (original) (raw)
final class DataStreamWriter[T] extends AnyRef
Ordering
- Alphabetic
- By Inheritance
Inherited
DataStreamWriter
AnyRef
Any
Hide All
Show All
Value Members
- final def !=(arg0: Any): Boolean
- final def ##(): Int
- final def ==(arg0: Any): Boolean
- final def asInstanceOf[T0]: T0
- def clone(): AnyRef
- final def eq(arg0: AnyRef): Boolean
- def equals(arg0: Any): Boolean
- def finalize(): Unit
- def foreach(writer: ForeachWriter[T]): DataStreamWriter[T]
- def foreachBatch(function: VoidFunction2[Dataset[T], Long]): DataStreamWriter[T]
- def foreachBatch(function: (Dataset[T], Long) ⇒ Unit): DataStreamWriter[T]
- def format(source: String): DataStreamWriter[T]
- final def getClass(): Class[_]
- def hashCode(): Int
- final def isInstanceOf[T0]: Boolean
- final def ne(arg0: AnyRef): Boolean
- final def notify(): Unit
- final def notifyAll(): Unit
- def option(key: String, value: Double): DataStreamWriter[T]
- def option(key: String, value: Long): DataStreamWriter[T]
- def option(key: String, value: Boolean): DataStreamWriter[T]
- def option(key: String, value: String): DataStreamWriter[T]
- def options(options: Map[String, String]): DataStreamWriter[T]
- def options(options: Map[String, String]): DataStreamWriter[T]
- def outputMode(outputMode: String): DataStreamWriter[T]
- def outputMode(outputMode: OutputMode): DataStreamWriter[T]
- def partitionBy(colNames: String*): DataStreamWriter[T]
- def queryName(queryName: String): DataStreamWriter[T]
- def start(): StreamingQuery
- def start(path: String): StreamingQuery
- final def synchronized[T0](arg0: ⇒ T0): T0
- def toString(): String
- def toTable(tableName: String): StreamingQuery
- def trigger(trigger: Trigger): DataStreamWriter[T]
- final def wait(): Unit
- final def wait(arg0: Long, arg1: Int): Unit
- final def wait(arg0: Long): Unit