Spark 3.5.5 ScalaDoc - org.apache.spark.SparkContext (original) (raw)
class SparkContext extends Logging
Ordering
- Alphabetic
- By Inheritance
Inherited
SparkContext
Logging
AnyRef
Any
Hide All
Show All
Instance Constructors
- new SparkContext(master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map())
- new SparkContext(master: String, appName: String, conf: SparkConf)
- new SparkContext()
- new SparkContext(config: SparkConf)
Value Members
- final def !=(arg0: Any): Boolean
- final def ##(): Int
- final def ==(arg0: Any): Boolean
- def addArchive(path: String): Unit
- def addFile(path: String, recursive: Boolean): Unit
- def addFile(path: String): Unit
- def addJar(path: String): Unit
- def addJobTag(tag: String): Unit
- def addSparkListener(listener: SparkListenerInterface): Unit
- def appName: String
- def applicationAttemptId: Option[String]
- def applicationId: String
- def archives: Seq[String]
- final def asInstanceOf[T0]: T0
- def binaryFiles(path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, PortableDataStream)]
- def binaryRecords(path: String, recordLength: Int, conf: Configuration = hadoopConfiguration): RDD[Array[Byte]]
- def broadcast[T](value: T)(implicit arg0: ClassTag[T]): Broadcast[T]
- def cancelAllJobs(): Unit
- def cancelJob(jobId: Int): Unit
- def cancelJob(jobId: Int, reason: String): Unit
- def cancelJobGroup(groupId: String): Unit
- def cancelJobsWithTag(tag: String): Unit
- def cancelStage(stageId: Int): Unit
- def cancelStage(stageId: Int, reason: String): Unit
- def checkpointFile[T](path: String)(implicit arg0: ClassTag[T]): RDD[T]
- def clearCallSite(): Unit
- def clearJobGroup(): Unit
- def clearJobTags(): Unit
- def clone(): AnyRef
- def collectionAccumulator[T](name: String): CollectionAccumulator[T]
- def collectionAccumulator[T]: CollectionAccumulator[T]
- def defaultMinPartitions: Int
- def defaultParallelism: Int
- def deployMode: String
- def doubleAccumulator(name: String): DoubleAccumulator
- def doubleAccumulator: DoubleAccumulator
- def emptyRDD[T](implicit arg0: ClassTag[T]): RDD[T]
- final def eq(arg0: AnyRef): Boolean
- def equals(arg0: Any): Boolean
- def files: Seq[String]
- def finalize(): Unit
- def getAllPools: Seq[Schedulable]
- def getCheckpointDir: Option[String]
- final def getClass(): Class[_]
- def getConf: SparkConf
- def getExecutorMemoryStatus: Map[String, (Long, Long)]
- def getJobTags(): Set[String]
- def getLocalProperty(key: String): String
- def getPersistentRDDs: Map[Int, RDD[_]]
- def getPoolForName(pool: String): Option[Schedulable]
- def getRDDStorageInfo: Array[RDDInfo]
- def getSchedulingMode: SchedulingMode
- def hadoopConfiguration: Configuration
- def hadoopFile[K, V, F <: InputFormat[K, V]](path: String)(implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)]
- def hadoopFile[K, V, F <: InputFormat[K, V]](path: String, minPartitions: Int)(implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)]
- def hadoopFile[K, V](path: String, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)]
- def hadoopRDD[K, V](conf: JobConf, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)]
- def hashCode(): Int
- def initializeLogIfNecessary(isInterpreter: Boolean, silent: Boolean): Boolean
- def initializeLogIfNecessary(isInterpreter: Boolean): Unit
- final def isInstanceOf[T0]: Boolean
- def isLocal: Boolean
- def isStopped: Boolean
- def isTraceEnabled(): Boolean
- def jars: Seq[String]
- def killExecutor(executorId: String): Boolean
- def killExecutors(executorIds: Seq[String]): Boolean
- def killTaskAttempt(taskId: Long, interruptThread: Boolean = true, reason: String = ...): Boolean
- def listArchives(): Seq[String]
- def listFiles(): Seq[String]
- def listJars(): Seq[String]
- val localProperties: InheritableThreadLocal[Properties]
- def log: Logger
- def logDebug(msg: ⇒ String, throwable: Throwable): Unit
- def logDebug(msg: ⇒ String): Unit
- def logError(msg: ⇒ String, throwable: Throwable): Unit
- def logError(msg: ⇒ String): Unit
- def logInfo(msg: ⇒ String, throwable: Throwable): Unit
- def logInfo(msg: ⇒ String): Unit
- def logName: String
- def logTrace(msg: ⇒ String, throwable: Throwable): Unit
- def logTrace(msg: ⇒ String): Unit
- def logWarning(msg: ⇒ String, throwable: Throwable): Unit
- def logWarning(msg: ⇒ String): Unit
- def longAccumulator(name: String): LongAccumulator
- def longAccumulator: LongAccumulator
- def makeRDD[T](seq: Seq[(T, Seq[String])])(implicit arg0: ClassTag[T]): RDD[T]
- def makeRDD[T](seq: Seq[T], numSlices: Int = defaultParallelism)(implicit arg0: ClassTag[T]): RDD[T]
- def master: String
- final def ne(arg0: AnyRef): Boolean
- def newAPIHadoopFile[K, V, F <: InputFormat[K, V]](path: String, fClass: Class[F], kClass: Class[K], vClass: Class[V], conf: Configuration = hadoopConfiguration): RDD[(K, V)]
- def newAPIHadoopFile[K, V, F <: InputFormat[K, V]](path: String)(implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)]
- def newAPIHadoopRDD[K, V, F <: InputFormat[K, V]](conf: Configuration = hadoopConfiguration, fClass: Class[F], kClass: Class[K], vClass: Class[V]): RDD[(K, V)]
- final def notify(): Unit
- final def notifyAll(): Unit
- def objectFile[T](path: String, minPartitions: Int = defaultMinPartitions)(implicit arg0: ClassTag[T]): RDD[T]
- def parallelize[T](seq: Seq[T], numSlices: Int = defaultParallelism)(implicit arg0: ClassTag[T]): RDD[T]
- def range(start: Long, end: Long, step: Long = 1, numSlices: Int = defaultParallelism): RDD[Long]
- def register(acc: AccumulatorV2[_, _], name: String): Unit
- def register(acc: AccumulatorV2[_, _]): Unit
- def removeJobTag(tag: String): Unit
- def removeSparkListener(listener: SparkListenerInterface): Unit
- def requestExecutors(numAdditionalExecutors: Int): Boolean
- def requestTotalExecutors(numExecutors: Int, localityAwareTasks: Int, hostToLocalTaskCount: Map[String, Int]): Boolean
- def resources: Map[String, ResourceInformation]
- def runApproximateJob[T, U, R](rdd: RDD[T], func: (TaskContext, Iterator[T]) ⇒ U, evaluator: ApproximateEvaluator[U, R], timeout: Long): PartialResult[R]
- def runJob[T, U](rdd: RDD[T], processPartition: (Iterator[T]) ⇒ U, resultHandler: (Int, U) ⇒ Unit)(implicit arg0: ClassTag[U]): Unit
- def runJob[T, U](rdd: RDD[T], processPartition: (TaskContext, Iterator[T]) ⇒ U, resultHandler: (Int, U) ⇒ Unit)(implicit arg0: ClassTag[U]): Unit
- def runJob[T, U](rdd: RDD[T], func: (Iterator[T]) ⇒ U)(implicit arg0: ClassTag[U]): Array[U]
- def runJob[T, U](rdd: RDD[T], func: (TaskContext, Iterator[T]) ⇒ U)(implicit arg0: ClassTag[U]): Array[U]
- def runJob[T, U](rdd: RDD[T], func: (Iterator[T]) ⇒ U, partitions: Seq[Int])(implicit arg0: ClassTag[U]): Array[U]
- def runJob[T, U](rdd: RDD[T], func: (TaskContext, Iterator[T]) ⇒ U, partitions: Seq[Int])(implicit arg0: ClassTag[U]): Array[U]
- def runJob[T, U](rdd: RDD[T], func: (TaskContext, Iterator[T]) ⇒ U, partitions: Seq[Int], resultHandler: (Int, U) ⇒ Unit)(implicit arg0: ClassTag[U]): Unit
- def sequenceFile[K, V](path: String, minPartitions: Int = defaultMinPartitions)(implicit km: ClassTag[K], vm: ClassTag[V], kcf: () ⇒ WritableConverter[K], vcf: () ⇒ WritableConverter[V]): RDD[(K, V)]
- def sequenceFile[K, V](path: String, keyClass: Class[K], valueClass: Class[V]): RDD[(K, V)]
- def sequenceFile[K, V](path: String, keyClass: Class[K], valueClass: Class[V], minPartitions: Int): RDD[(K, V)]
- def setCallSite(shortCallSite: String): Unit
- def setCheckpointDir(directory: String): Unit
- def setInterruptOnCancel(interruptOnCancel: Boolean): Unit
- def setJobDescription(value: String): Unit
- def setJobGroup(groupId: String, description: String, interruptOnCancel: Boolean = false): Unit
- def setLocalProperty(key: String, value: String): Unit
- def setLogLevel(logLevel: String): Unit
- val sparkUser: String
- val startTime: Long
- def statusTracker: SparkStatusTracker
- def stop(exitCode: Int): Unit
- def stop(): Unit
- def submitJob[T, U, R](rdd: RDD[T], processPartition: (Iterator[T]) ⇒ U, partitions: Seq[Int], resultHandler: (Int, U) ⇒ Unit, resultFunc: ⇒ R): SimpleFutureAction[R]
- final def synchronized[T0](arg0: ⇒ T0): T0
- def textFile(path: String, minPartitions: Int = defaultMinPartitions): RDD[String]
- def toString(): String
- def uiWebUrl: Option[String]
- def union[T](first: RDD[T], rest: RDD[T]*)(implicit arg0: ClassTag[T]): RDD[T]
- def union[T](rdds: Seq[RDD[T]])(implicit arg0: ClassTag[T]): RDD[T]
- def version: String
- final def wait(): Unit
- final def wait(arg0: Long, arg1: Int): Unit
- final def wait(arg0: Long): Unit
- def wholeTextFiles(path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, String)]