Spark 3.5.5 ScalaDoc - org.apache.spark.SparkContext (original) (raw)

class SparkContext extends Logging

Ordering

  1. Alphabetic
  2. By Inheritance

Inherited

  1. SparkContext

  2. Logging

  3. AnyRef

  4. Any

  5. Hide All

  6. Show All

Instance Constructors

  1. new SparkContext(master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map())
  2. new SparkContext(master: String, appName: String, conf: SparkConf)
  3. new SparkContext()
  4. new SparkContext(config: SparkConf)

Value Members

  1. final def !=(arg0: Any): Boolean
  2. final def ##(): Int
  3. final def ==(arg0: Any): Boolean
  4. def addArchive(path: String): Unit
  5. def addFile(path: String, recursive: Boolean): Unit
  6. def addFile(path: String): Unit
  7. def addJar(path: String): Unit
  8. def addJobTag(tag: String): Unit
  9. def addSparkListener(listener: SparkListenerInterface): Unit
  10. def appName: String
  11. def applicationAttemptId: Option[String]
  12. def applicationId: String
  13. def archives: Seq[String]
  14. final def asInstanceOf[T0]: T0
  15. def binaryFiles(path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, PortableDataStream)]
  16. def binaryRecords(path: String, recordLength: Int, conf: Configuration = hadoopConfiguration): RDD[Array[Byte]]
  17. def broadcast[T](value: T)(implicit arg0: ClassTag[T]): Broadcast[T]
  18. def cancelAllJobs(): Unit
  19. def cancelJob(jobId: Int): Unit
  20. def cancelJob(jobId: Int, reason: String): Unit
  21. def cancelJobGroup(groupId: String): Unit
  22. def cancelJobsWithTag(tag: String): Unit
  23. def cancelStage(stageId: Int): Unit
  24. def cancelStage(stageId: Int, reason: String): Unit
  25. def checkpointFile[T](path: String)(implicit arg0: ClassTag[T]): RDD[T]
  26. def clearCallSite(): Unit
  27. def clearJobGroup(): Unit
  28. def clearJobTags(): Unit
  29. def clone(): AnyRef
  30. def collectionAccumulator[T](name: String): CollectionAccumulator[T]
  31. def collectionAccumulator[T]: CollectionAccumulator[T]
  32. def defaultMinPartitions: Int
  33. def defaultParallelism: Int
  34. def deployMode: String
  35. def doubleAccumulator(name: String): DoubleAccumulator
  36. def doubleAccumulator: DoubleAccumulator
  37. def emptyRDD[T](implicit arg0: ClassTag[T]): RDD[T]
  38. final def eq(arg0: AnyRef): Boolean
  39. def equals(arg0: Any): Boolean
  40. def files: Seq[String]
  41. def finalize(): Unit
  42. def getAllPools: Seq[Schedulable]
  43. def getCheckpointDir: Option[String]
  44. final def getClass(): Class[_]
  45. def getConf: SparkConf
  46. def getExecutorMemoryStatus: Map[String, (Long, Long)]
  47. def getJobTags(): Set[String]
  48. def getLocalProperty(key: String): String
  49. def getPersistentRDDs: Map[Int, RDD[_]]
  50. def getPoolForName(pool: String): Option[Schedulable]
  51. def getRDDStorageInfo: Array[RDDInfo]
  52. def getSchedulingMode: SchedulingMode
  53. def hadoopConfiguration: Configuration
  54. def hadoopFile[K, V, F <: InputFormat[K, V]](path: String)(implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)]
  55. def hadoopFile[K, V, F <: InputFormat[K, V]](path: String, minPartitions: Int)(implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)]
  56. def hadoopFile[K, V](path: String, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)]
  57. def hadoopRDD[K, V](conf: JobConf, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)]
  58. def hashCode(): Int
  59. def initializeLogIfNecessary(isInterpreter: Boolean, silent: Boolean): Boolean
  60. def initializeLogIfNecessary(isInterpreter: Boolean): Unit
  61. final def isInstanceOf[T0]: Boolean
  62. def isLocal: Boolean
  63. def isStopped: Boolean
  64. def isTraceEnabled(): Boolean
  65. def jars: Seq[String]
  66. def killExecutor(executorId: String): Boolean
  67. def killExecutors(executorIds: Seq[String]): Boolean
  68. def killTaskAttempt(taskId: Long, interruptThread: Boolean = true, reason: String = ...): Boolean
  69. def listArchives(): Seq[String]
  70. def listFiles(): Seq[String]
  71. def listJars(): Seq[String]
  72. val localProperties: InheritableThreadLocal[Properties]
  73. def log: Logger
  74. def logDebug(msg: ⇒ String, throwable: Throwable): Unit
  75. def logDebug(msg: ⇒ String): Unit
  76. def logError(msg: ⇒ String, throwable: Throwable): Unit
  77. def logError(msg: ⇒ String): Unit
  78. def logInfo(msg: ⇒ String, throwable: Throwable): Unit
  79. def logInfo(msg: ⇒ String): Unit
  80. def logName: String
  81. def logTrace(msg: ⇒ String, throwable: Throwable): Unit
  82. def logTrace(msg: ⇒ String): Unit
  83. def logWarning(msg: ⇒ String, throwable: Throwable): Unit
  84. def logWarning(msg: ⇒ String): Unit
  85. def longAccumulator(name: String): LongAccumulator
  86. def longAccumulator: LongAccumulator
  87. def makeRDD[T](seq: Seq[(T, Seq[String])])(implicit arg0: ClassTag[T]): RDD[T]
  88. def makeRDD[T](seq: Seq[T], numSlices: Int = defaultParallelism)(implicit arg0: ClassTag[T]): RDD[T]
  89. def master: String
  90. final def ne(arg0: AnyRef): Boolean
  91. def newAPIHadoopFile[K, V, F <: InputFormat[K, V]](path: String, fClass: Class[F], kClass: Class[K], vClass: Class[V], conf: Configuration = hadoopConfiguration): RDD[(K, V)]
  92. def newAPIHadoopFile[K, V, F <: InputFormat[K, V]](path: String)(implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)]
  93. def newAPIHadoopRDD[K, V, F <: InputFormat[K, V]](conf: Configuration = hadoopConfiguration, fClass: Class[F], kClass: Class[K], vClass: Class[V]): RDD[(K, V)]
  94. final def notify(): Unit
  95. final def notifyAll(): Unit
  96. def objectFile[T](path: String, minPartitions: Int = defaultMinPartitions)(implicit arg0: ClassTag[T]): RDD[T]
  97. def parallelize[T](seq: Seq[T], numSlices: Int = defaultParallelism)(implicit arg0: ClassTag[T]): RDD[T]
  98. def range(start: Long, end: Long, step: Long = 1, numSlices: Int = defaultParallelism): RDD[Long]
  99. def register(acc: AccumulatorV2[_, _], name: String): Unit
  100. def register(acc: AccumulatorV2[_, _]): Unit
  101. def removeJobTag(tag: String): Unit
  102. def removeSparkListener(listener: SparkListenerInterface): Unit
  103. def requestExecutors(numAdditionalExecutors: Int): Boolean
  104. def requestTotalExecutors(numExecutors: Int, localityAwareTasks: Int, hostToLocalTaskCount: Map[String, Int]): Boolean
  105. def resources: Map[String, ResourceInformation]
  106. def runApproximateJob[T, U, R](rdd: RDD[T], func: (TaskContext, Iterator[T]) ⇒ U, evaluator: ApproximateEvaluator[U, R], timeout: Long): PartialResult[R]
  107. def runJob[T, U](rdd: RDD[T], processPartition: (Iterator[T]) ⇒ U, resultHandler: (Int, U) ⇒ Unit)(implicit arg0: ClassTag[U]): Unit
  108. def runJob[T, U](rdd: RDD[T], processPartition: (TaskContext, Iterator[T]) ⇒ U, resultHandler: (Int, U) ⇒ Unit)(implicit arg0: ClassTag[U]): Unit
  109. def runJob[T, U](rdd: RDD[T], func: (Iterator[T]) ⇒ U)(implicit arg0: ClassTag[U]): Array[U]
  110. def runJob[T, U](rdd: RDD[T], func: (TaskContext, Iterator[T]) ⇒ U)(implicit arg0: ClassTag[U]): Array[U]
  111. def runJob[T, U](rdd: RDD[T], func: (Iterator[T]) ⇒ U, partitions: Seq[Int])(implicit arg0: ClassTag[U]): Array[U]
  112. def runJob[T, U](rdd: RDD[T], func: (TaskContext, Iterator[T]) ⇒ U, partitions: Seq[Int])(implicit arg0: ClassTag[U]): Array[U]
  113. def runJob[T, U](rdd: RDD[T], func: (TaskContext, Iterator[T]) ⇒ U, partitions: Seq[Int], resultHandler: (Int, U) ⇒ Unit)(implicit arg0: ClassTag[U]): Unit
  114. def sequenceFile[K, V](path: String, minPartitions: Int = defaultMinPartitions)(implicit km: ClassTag[K], vm: ClassTag[V], kcf: () ⇒ WritableConverter[K], vcf: () ⇒ WritableConverter[V]): RDD[(K, V)]
  115. def sequenceFile[K, V](path: String, keyClass: Class[K], valueClass: Class[V]): RDD[(K, V)]
  116. def sequenceFile[K, V](path: String, keyClass: Class[K], valueClass: Class[V], minPartitions: Int): RDD[(K, V)]
  117. def setCallSite(shortCallSite: String): Unit
  118. def setCheckpointDir(directory: String): Unit
  119. def setInterruptOnCancel(interruptOnCancel: Boolean): Unit
  120. def setJobDescription(value: String): Unit
  121. def setJobGroup(groupId: String, description: String, interruptOnCancel: Boolean = false): Unit
  122. def setLocalProperty(key: String, value: String): Unit
  123. def setLogLevel(logLevel: String): Unit
  124. val sparkUser: String
  125. val startTime: Long
  126. def statusTracker: SparkStatusTracker
  127. def stop(exitCode: Int): Unit
  128. def stop(): Unit
  129. def submitJob[T, U, R](rdd: RDD[T], processPartition: (Iterator[T]) ⇒ U, partitions: Seq[Int], resultHandler: (Int, U) ⇒ Unit, resultFunc: ⇒ R): SimpleFutureAction[R]
  130. final def synchronized[T0](arg0: ⇒ T0): T0
  131. def textFile(path: String, minPartitions: Int = defaultMinPartitions): RDD[String]
  132. def toString(): String
  133. def uiWebUrl: Option[String]
  134. def union[T](first: RDD[T], rest: RDD[T]*)(implicit arg0: ClassTag[T]): RDD[T]
  135. def union[T](rdds: Seq[RDD[T]])(implicit arg0: ClassTag[T]): RDD[T]
  136. def version: String
  137. final def wait(): Unit
  138. final def wait(arg0: Long, arg1: Int): Unit
  139. final def wait(arg0: Long): Unit
  140. def wholeTextFiles(path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, String)]

Inherited from Logging

Inherited from AnyRef

Inherited from Any

Ungrouped