· 6 years ago · Mar 28, 2020, 05:24 PM
1Only in .: .DS_Store
2diff ./BarrierTaskContext.scala /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/BarrierTaskContext.scala
321a22
4> import scala.collection.JavaConverters._
5213a215,218
6> override def resourcesJMap(): java.util.Map[String, ResourceInformation] = {
7> resources().asJava
8> }
9>
10diff ./ContextCleaner.scala /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/ContextCleaner.scala
1129a30
12> import org.apache.spark.shuffle.api.ShuffleDriverComponents
1361c62,64
14< private[spark] class ContextCleaner(sc: SparkContext) extends Logging {
15---
16> private[spark] class ContextCleaner(
17> sc: SparkContext,
18> shuffleDriverComponents: ShuffleDriverComponents) extends Logging {
19224c227
20< blockManagerMaster.removeShuffle(shuffleId, blocking)
21---
22> shuffleDriverComponents.removeShuffle(shuffleId, blocking)
23272d274
24< private def blockManagerMaster = sc.env.blockManager.master
25diff ./Dependency.scala /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/Dependency.scala
2698a99
27> _rdd.sparkContext.shuffleDriverComponents.registerShuffle(shuffleId)
28diff ./ExecutorAllocationManager.scala /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala
29291c291
30< updateAndSyncNumExecutorsTarget(clock.getTimeMillis())
31---
32> updateAndSyncNumExecutorsTarget(clock.nanoTime())
33339c339
34< addTime = now + (sustainedSchedulerBacklogTimeoutS * 1000)
35---
36> addTime = now + TimeUnit.SECONDS.toNanos(sustainedSchedulerBacklogTimeoutS)
37484c484
38< addTime = clock.getTimeMillis + schedulerBacklogTimeoutS * 1000
39---
40> addTime = clock.nanoTime() + TimeUnit.SECONDS.toNanos(schedulerBacklogTimeoutS)
41diff ./MapOutputTracker.scala /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/MapOutputTracker.scala
4220c20
43< import java.io._
44---
45> import java.io.{ByteArrayInputStream, ObjectInputStream, ObjectOutputStream}
4623d22
47< import java.util.zip.{GZIPInputStream, GZIPOutputStream}
4831a31,32
49> import org.apache.commons.io.output.{ByteArrayOutputStream => ApacheByteArrayOutputStream}
50>
5134a36
52> import org.apache.spark.io.CompressionCodec
5336c38
54< import org.apache.spark.scheduler.MapStatus
55---
56> import org.apache.spark.scheduler.{ExecutorCacheTaskLocation, MapStatus}
57195c197,198
58< minBroadcastSize: Int): Array[Byte] = {
59---
60> minBroadcastSize: Int,
61> conf: SparkConf): Array[Byte] = {
62207c210
63< mapStatuses, broadcastManager, isLocal, minBroadcastSize)
64---
65> mapStatuses, broadcastManager, isLocal, minBroadcastSize, conf)
66322c325
67< getMapSizesByExecutorId(shuffleId, reduceId, reduceId + 1, useOldFetchProtocol = false)
68---
69> getMapSizesByExecutorId(shuffleId, reduceId, reduceId + 1)
70337,338c340
71< endPartition: Int,
72< useOldFetchProtocol: Boolean)
73---
74> endPartition: Int)
75341a344,358
76> * Called from executors to get the server URIs and output sizes for each shuffle block that
77> * needs to be read from a given range of map output partitions (startPartition is included but
78> * endPartition is excluded from the range) and is produced by a specific mapper.
79> *
80> * @return A sequence of 2-item tuples, where the first item in the tuple is a BlockManagerId,
81> * and the second item is a sequence of (shuffle block id, shuffle block size, map index)
82> * tuples describing the shuffle blocks that are stored at that block manager.
83> */
84> def getMapSizesByMapIndex(
85> shuffleId: Int,
86> mapIndex: Int,
87> startPartition: Int,
88> endPartition: Int): Iterator[(BlockManagerId, Seq[(BlockId, Long, Int)])]
89>
90> /**
91360,361c377,378
92< broadcastManager: BroadcastManager,
93< isLocal: Boolean)
94---
95> private[spark] val broadcastManager: BroadcastManager,
96> private[spark] val isLocal: Boolean)
97436c453,454
98< shuffleStatus.serializedMapStatus(broadcastManager, isLocal, minSizeForBroadcast))
99---
100> shuffleStatus.serializedMapStatus(broadcastManager, isLocal, minSizeForBroadcast,
101> conf))
102671a690,714
103> /**
104> * Return the location where the Mapper ran. The locations each includes both a host and an
105> * executor id on that host.
106> *
107> * @param dep shuffle dependency object
108> * @param mapId the map id
109> * @return a sequence of locations where task runs.
110> */
111> def getMapLocation(dep: ShuffleDependency[_, _, _], mapId: Int): Seq[String] =
112> {
113> val shuffleStatus = shuffleStatuses.get(dep.shuffleId).orNull
114> if (shuffleStatus != null) {
115> shuffleStatus.withMapStatuses { statuses =>
116> if (mapId >= 0 && mapId < statuses.length) {
117> Seq( ExecutorCacheTaskLocation(statuses(mapId).location.host,
118> statuses(mapId).location.executorId).toString)
119> } else {
120> Nil
121> }
122> }
123> } else {
124> Nil
125> }
126> }
127>
128691,692c734
129< endPartition: Int,
130< useOldFetchProtocol: Boolean)
131---
132> endPartition: Int)
133699c741,763
134< shuffleId, startPartition, endPartition, statuses, useOldFetchProtocol)
135---
136> shuffleId, startPartition, endPartition, statuses)
137> }
138> case None =>
139> Iterator.empty
140> }
141> }
142>
143> override def getMapSizesByMapIndex(
144> shuffleId: Int,
145> mapIndex: Int,
146> startPartition: Int,
147> endPartition: Int): Iterator[(BlockManagerId, Seq[(BlockId, Long, Int)])] = {
148> logDebug(s"Fetching outputs for shuffle $shuffleId, mapIndex $mapIndex" +
149> s"partitions $startPartition-$endPartition")
150> shuffleStatuses.get(shuffleId) match {
151> case Some (shuffleStatus) =>
152> shuffleStatus.withMapStatuses { statuses =>
153> MapOutputTracker.convertMapStatuses(
154> shuffleId,
155> startPartition,
156> endPartition,
157> statuses,
158> Some(mapIndex))
159736,737c800
160< endPartition: Int,
161< useOldFetchProtocol: Boolean)
162---
163> endPartition: Int)
164740c803
165< val statuses = getStatuses(shuffleId)
166---
167> val statuses = getStatuses(shuffleId, conf)
168743c806,825
169< shuffleId, startPartition, endPartition, statuses, useOldFetchProtocol)
170---
171> shuffleId, startPartition, endPartition, statuses)
172> } catch {
173> case e: MetadataFetchFailedException =>
174> // We experienced a fetch failure so our mapStatuses cache is outdated; clear it:
175> mapStatuses.clear()
176> throw e
177> }
178> }
179>
180> override def getMapSizesByMapIndex(
181> shuffleId: Int,
182> mapIndex: Int,
183> startPartition: Int,
184> endPartition: Int): Iterator[(BlockManagerId, Seq[(BlockId, Long, Int)])] = {
185> logDebug(s"Fetching outputs for shuffle $shuffleId, mapIndex $mapIndex" +
186> s"partitions $startPartition-$endPartition")
187> val statuses = getStatuses(shuffleId, conf)
188> try {
189> MapOutputTracker.convertMapStatuses(shuffleId, startPartition, endPartition,
190> statuses, Some(mapIndex))
191758c840
192< private def getStatuses(shuffleId: Int): Array[MapStatus] = {
193---
194> private def getStatuses(shuffleId: Int, conf: SparkConf): Array[MapStatus] = {
195768c850
196< fetchedStatuses = MapOutputTracker.deserializeMapStatuses(fetchedBytes)
197---
198> fetchedStatuses = MapOutputTracker.deserializeMapStatuses(fetchedBytes, conf)
199810c892
200< // it to reduce tasks. We do this by compressing the serialized bytes using GZIP. They will
201---
202> // it to reduce tasks. We do this by compressing the serialized bytes using Zstd. They will
203812,814c894,904
204< def serializeMapStatuses(statuses: Array[MapStatus], broadcastManager: BroadcastManager,
205< isLocal: Boolean, minBroadcastSize: Int): (Array[Byte], Broadcast[Array[Byte]]) = {
206< val out = new ByteArrayOutputStream
207---
208> def serializeMapStatuses(
209> statuses: Array[MapStatus],
210> broadcastManager: BroadcastManager,
211> isLocal: Boolean,
212> minBroadcastSize: Int,
213> conf: SparkConf): (Array[Byte], Broadcast[Array[Byte]]) = {
214> // Using `org.apache.commons.io.output.ByteArrayOutputStream` instead of the standard one
215> // This implementation doesn't reallocate the whole memory block but allocates
216> // additional buffers. This way no buffers need to be garbage collected and
217> // the contents don't have to be copied to the new buffer.
218> val out = new ApacheByteArrayOutputStream()
219816c906,907
220< val objOut = new ObjectOutputStream(new GZIPOutputStream(out))
221---
222> val codec = CompressionCodec.createCodec(conf, "zstd")
223> val objOut = new ObjectOutputStream(codec.compressedOutputStream(out))
224833,835c924,929
225< val oos = new ObjectOutputStream(new GZIPOutputStream(out))
226< oos.writeObject(bcast)
227< oos.close()
228---
229> val oos = new ObjectOutputStream(codec.compressedOutputStream(out))
230> Utils.tryWithSafeFinally {
231> oos.writeObject(bcast)
232> } {
233> oos.close()
234> }
235845c939
236< def deserializeMapStatuses(bytes: Array[Byte]): Array[MapStatus] = {
237---
238> def deserializeMapStatuses(bytes: Array[Byte], conf: SparkConf): Array[MapStatus] = {
239849c943,947
240< val objIn = new ObjectInputStream(new GZIPInputStream(
241---
242> val codec = CompressionCodec.createCodec(conf, "zstd")
243> // The ZStd codec is wrapped in a `BufferedInputStream` which avoids overhead excessive
244> // of JNI call while trying to decompress small amount of data for each element
245> // of `MapStatuses`
246> val objIn = new ObjectInputStream(codec.compressedInputStream(
247886c984
248< * @param useOldFetchProtocol Whether to use the old shuffle fetch protocol.
249---
250> * @param mapIndex When specified, only shuffle blocks from this mapper will be processed.
251896c994
252< useOldFetchProtocol: Boolean): Iterator[(BlockManagerId, Seq[(BlockId, Long, Int)])] = {
253---
254> mapIndex : Option[Int] = None): Iterator[(BlockManagerId, Seq[(BlockId, Long, Int)])] = {
255899c997,998
256< for ((status, mapIndex) <- statuses.iterator.zipWithIndex) {
257---
258> val iter = statuses.iterator.zipWithIndex
259> for ((status, mapIndex) <- mapIndex.map(index => iter.filter(_._2 == index)).getOrElse(iter)) {
260908,916c1007,1008
261< if (useOldFetchProtocol) {
262< // While we use the old shuffle fetch protocol, we use mapIndex as mapId in the
263< // ShuffleBlockId.
264< splitsByAddress.getOrElseUpdate(status.location, ListBuffer()) +=
265< ((ShuffleBlockId(shuffleId, mapIndex, part), size, mapIndex))
266< } else {
267< splitsByAddress.getOrElseUpdate(status.location, ListBuffer()) +=
268< ((ShuffleBlockId(shuffleId, status.mapTaskId, part), size, mapIndex))
269< }
270---
271> splitsByAddress.getOrElseUpdate(status.location, ListBuffer()) +=
272> ((ShuffleBlockId(shuffleId, status.mapId, part), size, mapIndex))
273920a1013
274>
275diff ./SparkConf.scala /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/SparkConf.scala
27624a25
277>
27825a27
279>
280620c622,624
281< DeprecatedConfig("spark.yarn.services", "3.0.0", "Feature no longer available.")
282---
283> DeprecatedConfig("spark.yarn.services", "3.0.0", "Feature no longer available."),
284> DeprecatedConfig("spark.executor.plugins", "3.0.0",
285> "Feature replaced with new plugin API. See Monitoring documentation.")
286diff ./SparkContext.scala /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/SparkContext.scala
28750a51
288> import org.apache.spark.internal.plugin.PluginContainer
28960a62,63
290> import org.apache.spark.shuffle.ShuffleDataIOUtils
291> import org.apache.spark.shuffle.api.ShuffleDriverComponents
292219a223,224
293> private var _shuffleDriverComponents: ShuffleDriverComponents = _
294> private var _plugins: Option[PluginContainer] = None
295321a327,328
296> private[spark] def shuffleDriverComponents: ShuffleDriverComponents = _shuffleDriverComponents
297>
298526a534,538
299> _shuffleDriverComponents = ShuffleDataIOUtils.loadShuffleDataIO(config).driver()
300> _shuffleDriverComponents.initializeApplication().asScala.foreach { case (k, v) =>
301> _conf.set(ShuffleDataIOUtils.SHUFFLE_SPARK_CONF_PREFIX + k, v)
302> }
303>
304531a544,546
305> // Initialize any plugins before the task scheduler is initialized.
306> _plugins = PluginContainer(this)
307>
308561c576
309< _env.metricsSystem.start()
310---
311> _env.metricsSystem.start(_conf.get(METRICS_STATIC_SOURCES_ENABLED))
312579c594
313< Some(new ContextCleaner(this))
314---
315> Some(new ContextCleaner(this, _shuffleDriverComponents))
316613a629
317> _plugins.foreach(_.registerMetrics(applicationId))
3181969a1986,1988
319> _plugins.foreach(_.shutdown())
320> }
321> Utils.tryLogNonFatalError {
3221977a1997,2001
323> if (_shuffleDriverComponents != null) {
324> Utils.tryLogNonFatalError {
325> _shuffleDriverComponents.cleanupApplication()
326> }
327> }
3282778c2802,2805
329< val resourceNumSlots = execAmount / taskReq.amount
330---
331> // If the configured amount per task was < 1.0, a task is subdividing
332> // executor resources. If the amount per task was > 1.0, the task wants
333> // multiple executor resources.
334> val resourceNumSlots = Math.floor(execAmount * taskReq.numParts / taskReq.amount).toInt
3352788c2815,2821
336< if (taskReq.amount * numSlots < execAmount) {
337---
338> if ((numSlots * taskReq.amount / taskReq.numParts) < execAmount) {
339> val taskReqStr = if (taskReq.numParts > 1) {
340> s"${taskReq.amount}/${taskReq.numParts}"
341> } else {
342> s"${taskReq.amount}"
343> }
344> val resourceNumSlots = Math.floor(execAmount * taskReq.numParts/taskReq.amount).toInt
3452790,2792c2823,2827
346< s"(exec = ${execAmount}, task = ${taskReq.amount}) will result in wasted " +
347< s"resources due to resource ${limitingResourceName} limiting the number of " +
348< s"runnable tasks per executor to: ${numSlots}. Please adjust your configuration."
349---
350> s"(exec = ${execAmount}, task = ${taskReqStr}, " +
351> s"runnable tasks = ${resourceNumSlots}) will " +
352> s"result in wasted resources due to resource ${limitingResourceName} limiting the " +
353> s"number of runnable tasks per executor to: ${numSlots}. Please adjust " +
354> s"your configuration."
355diff ./SparkEnv.scala /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/SparkEnv.scala
35624a25
357> import scala.collection.concurrent
358197a199
359> bindAddress: String,
360205c207
361< hostname,
362---
363> bindAddress,
364215a218,228
365> private[spark] def createExecutorEnv(
366> conf: SparkConf,
367> executorId: String,
368> hostname: String,
369> numCores: Int,
370> ioEncryptionKey: Option[Array[Byte]],
371> isLocal: Boolean): SparkEnv = {
372> createExecutorEnv(conf, executorId, hostname,
373> hostname, numCores, ioEncryptionKey, isLocal)
374> }
375>
376342,354c355,374
377< val blockManagerMaster = new BlockManagerMaster(registerOrLookupEndpoint(
378< BlockManagerMaster.DRIVER_ENDPOINT_NAME,
379< new BlockManagerMasterEndpoint(
380< rpcEnv,
381< isLocal,
382< conf,
383< listenerBus,
384< if (conf.get(config.SHUFFLE_SERVICE_FETCH_RDD_ENABLED)) {
385< externalShuffleClient
386< } else {
387< None
388< })),
389< conf, isDriver)
390---
391> // Mapping from block manager id to the block manager's information.
392> val blockManagerInfo = new concurrent.TrieMap[BlockManagerId, BlockManagerInfo]()
393> val blockManagerMaster = new BlockManagerMaster(
394> registerOrLookupEndpoint(
395> BlockManagerMaster.DRIVER_ENDPOINT_NAME,
396> new BlockManagerMasterEndpoint(
397> rpcEnv,
398> isLocal,
399> conf,
400> listenerBus,
401> if (conf.get(config.SHUFFLE_SERVICE_FETCH_RDD_ENABLED)) {
402> externalShuffleClient
403> } else {
404> None
405> }, blockManagerInfo)),
406> registerOrLookupEndpoint(
407> BlockManagerMaster.DRIVER_HEARTBEAT_ENDPOINT_NAME,
408> new BlockManagerMasterHeartbeatEndpoint(rpcEnv, isLocal, blockManagerInfo)),
409> conf,
410> isDriver)
411386c406
412< ms.start()
413---
414> ms.start(conf.get(METRICS_STATIC_SOURCES_ENABLED))
415diff ./TaskContext.scala /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/TaskContext.scala
416187a188,195
417> /**
418> * (java-specific) Resources allocated to the task. The key is the resource name and the value
419> * is information about the resource. Please refer to
420> * [[org.apache.spark.resource.ResourceInformation]] for specifics.
421> */
422> @Evolving
423> def resourcesJMap(): java.util.Map[String, ResourceInformation]
424>
425diff ./TaskContextImpl.scala /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/TaskContextImpl.scala
42622a23
427> import scala.collection.JavaConverters._
428103a105,108
429> override def resourcesJMap(): java.util.Map[String, ResourceInformation] = {
430> resources.asJava
431> }
432>
433Only in /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark: TaskOutputFileAlreadyExistException.scala
434diff ./TestUtils.scala /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/TestUtils.scala
43527c27
436< import java.util.{Arrays, EnumSet, Properties}
437---
438> import java.util.{Arrays, EnumSet, Locale, Properties}
43929c29
440< import java.util.jar.{JarEntry, JarOutputStream}
441---
442> import java.util.jar.{JarEntry, JarOutputStream, Manifest}
443100c100,114
444< def createJar(files: Seq[File], jarFile: File, directoryPrefix: Option[String] = None): URL = {
445---
446> def createJar(
447> files: Seq[File],
448> jarFile: File,
449> directoryPrefix: Option[String] = None,
450> mainClass: Option[String] = None): URL = {
451> val manifest = mainClass match {
452> case Some(mc) =>
453> val m = new Manifest()
454> m.getMainAttributes.putValue("Manifest-Version", "1.0")
455> m.getMainAttributes.putValue("Main-Class", mc)
456> m
457> case None =>
458> new Manifest()
459> }
460>
461102c116
462< val jarStream = new JarOutputStream(jarFileStream, new java.util.jar.Manifest())
463---
464> val jarStream = new JarOutputStream(jarFileStream, manifest)
465203c217,225
466< def assertExceptionMsg(exception: Throwable, msg: String): Unit = {
467---
468> def assertExceptionMsg(exception: Throwable, msg: String, ignoreCase: Boolean = false): Unit = {
469> def contain(msg1: String, msg2: String): Boolean = {
470> if (ignoreCase) {
471> msg1.toLowerCase(Locale.ROOT).contains(msg2.toLowerCase(Locale.ROOT))
472> } else {
473> msg1.contains(msg2)
474> }
475> }
476>
477205c227
478< var contains = e.getMessage.contains(msg)
479---
480> var contains = contain(e.getMessage, msg)
481208c230
482< contains = e.getMessage.contains(msg)
483---
484> contains = contain(e.getMessage, msg)
485Only in .: annotation
486Common subdirectories: ./api and /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/api
487Common subdirectories: ./broadcast and /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/broadcast
488Common subdirectories: ./deploy and /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/deploy
489Common subdirectories: ./executor and /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/executor
490Common subdirectories: ./input and /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/input
491Common subdirectories: ./internal and /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/internal
492Common subdirectories: ./io and /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/io
493Common subdirectories: ./launcher and /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/launcher
494Common subdirectories: ./mapred and /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/mapred
495Common subdirectories: ./memory and /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/memory
496Common subdirectories: ./metrics and /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/metrics
497Common subdirectories: ./network and /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/network
498Common subdirectories: ./partial and /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/partial
499Common subdirectories: ./rdd and /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/rdd
500Common subdirectories: ./resource and /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/resource
501Common subdirectories: ./rpc and /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/rpc
502Common subdirectories: ./scheduler and /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/scheduler
503Common subdirectories: ./security and /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/security
504Common subdirectories: ./serializer and /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/serializer
505Common subdirectories: ./shuffle and /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/shuffle
506Common subdirectories: ./status and /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/status
507Common subdirectories: ./storage and /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/storage
508Common subdirectories: ./ui and /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/ui
509Common subdirectories: ./util and /Users/bimde/Downloads/spark-f280c6aa54d80251da66ab370d32a7d93b01f225/core/src/main/scala/org/apache/spark/util