我尝试按照此链接中的答案将嵌套列表转换为Dataframe
my_data =[['apple','ball','ballon'],['cat','camel','james'],['none','focus','cake']]
from pyspark.sql import Row
R = Row('ID', 'words')
spark.createDataFrame([R(i, x) for i, x in enumerate(my_data)]).show()
但是我得到了这个错误:
---------------------------------------------------------------------------
FileNotFoundError Traceback (most recent call last)
<ipython-input-147-780a8d7196df> in <module>()
----> 5 spark.createDataFrame([R(i, x) for i, x in enumerate(my_data)]).show()
F:\spark\spark\python\pyspark\sql\session.py in createDataFrame(self, data, schema, samplingRatio, verifySchema)
--> 689 rdd, schema = self._createFromLocal(map(prepare, data), schema)
F:\spark\spark\python\pyspark\sql\session.py in _createFromLocal(self, data, schema)
--> 424 return self._sc.parallelize(data), schema
F:\spark\spark\python\pyspark\context.py in parallelize(self, c, numSlices)
--> 484 jrdd = self._serialize_to_jvm(c, numSlices, serializer)
F:\spark\spark\python\pyspark\context.py in _serialize_to_jvm(self, data, parallelism, serializer)
--> 493 tempFile = NamedTemporaryFile(delete=False, dir=self._temp_dir)
~\Anaconda3\lib\tempfile.py in NamedTemporaryFile(mode, buffering, encoding, newline, suffix, prefix, dir, delete)
547 flags |= _os.O_TEMPORARY
548
--> 549 (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type)
550 try:
551 file = _io.open(fd, mode, buffering=buffering,
~\Anaconda3\lib\tempfile.py in _mkstemp_inner(dir, pre, suf, flags, output_type)
258 file = _os.path.join(dir, pre + name + suf)
259 try:
--> 260 fd = _os.open(file, flags, 0o600)
261 except FileExistsError:
262 continue # try again
FileNotFoundError: [Errno 2] No such file or directory: 'C:\\Users\\*****\\AppData\\Local\\Temp\\spark-e340269d-a29e-4b95-90d3-c424a04fcb0a\\pyspark-f7fce557-e11b-47c9-b7a5-81e72a360b36\\tmp7n0s97t2'