blob: 8298da696cbfbcac82143b73dec223388912a14a (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
|
# Maintainer: William Tang <galaxyking0419@gmail.com>
# Contributor: Lukas Zimmermann ("lukaszimmermann") <luk [dot] zim91 [at] gmail.com>
# Contributor: François Garillot ("huitseeker") <francois [at] garillot.net>
# Contributor: Christian Krause ("wookietreiber") <kizkizzbangbang@gmail.com>
# Contributor: Emanuel Fontelles ("emanuelfontelles") <emanuelfontelles@hotmail.com>
pkgname=apache-spark
pkgver=3.5.1
pkgrel=1
pkgdesc="A unified analytics engine for large-scale data processing"
arch=('any')
url="http://spark.apache.org"
license=('APACHE')
depends=('inetutils' 'java-runtime-headless<=17')
source=("https://dlcdn.apache.org/spark/spark-$pkgver/spark-$pkgver-bin-hadoop3.tgz"
'apache-spark.sh'
'apache-spark-master.service'
'apache-spark-worker@.service')
sha256sums=('5df15f8027067c060fe47ebd351a1431a61dbecc9c28b8dd29e2c6e1935c23eb'
'0cc82baad4d878d4e2bc5864a00b99d38f2906781ea47ee6282546788e797049'
'de54c025ca8ce34a7b4fd95ec7b8d5dec44582787a0bd8da09232f26e2182c9a'
'47e6c154daecf7631ac9a33fe53a76888070c823c4381fcbde8d98377e586505')
install=apache-spark.install
prepare() {
cd spark-${pkgver}-bin-hadoop3
# Remove Python and R files
# rm -rf python R
# Remove windows batch files
rm bin/*.cmd
}
package() {
# Create directory structure
mkdir -p $pkgdir/{etc/profile.d,opt,usr/lib/systemd/system}
# Install path profile
cp $pkgname.sh $pkgdir/etc/profile.d/
# Install systemd services
cp $pkgname-master.service $pkgname-worker@.service $pkgdir/usr/lib/systemd/system/
# Install program files
mv spark-${pkgver}-bin-hadoop3 $pkgdir/opt/$pkgname
}
|