aboutsummaryrefslogtreecommitdiff
path: root/bigtop-packages/src/charm/hadoop/layer-hadoop-resourcemanager/actions.yaml
blob: 77a644bfa508a526f4e7ee39414bf05d24fb7581 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
smoke-test:
    description: Run an Apache Bigtop smoke test.
mrbench:
    description: Mapreduce benchmark for small jobs
    params:
        basedir:
            description: DFS working directory
            type: string
            default: "/benchmarks/MRBench"
        numruns:
            description: Number of times to run the job
            type: integer
            default: 1
        maps:
            description: number of maps for each run
            type: integer
            default: 2
        reduces:
            description: number of reduces for each run
            type: integer
            default: 1
        inputlines:
            description: number of input lines to generate
            type: integer
            default: 1
        inputtype:
            description: 'Type of input to generate, one of [ascending, descending, random]'
            type: string
            default: "ascending"
            enum: [ascending,descending,random]
nnbench:
    description: Load test the NameNode hardware and configuration
    params:
        maps:
                description: number of map jobs
                type: integer
                default: 12
        reduces:
                description: number of reduces
                type: integer
                default: 6
        blocksize:
                description: block size
                type: integer
                default: 1
        bytes:
                description: bytes to write
                type: integer
                default: 0
        numfiles:
                description: number of files
                type: integer
                default: 0
        repfactor:
                description: replication factor per file
                type: integer
                default: 3
        basedir:
                description: DFS working directory
                type: string
                default: "/benchmarks/NNBench"
testdfsio:
    description: DFS IO Testing
    params:
        mode:
                description: read or write IO test
                type: string
                default: "write"
                enum: [read,write]
        numfiles:
                description: number of files
                type: integer
                default: 10
        filesize:
                description: filesize in MB
                type: integer
                default: 1000
        resfile:
                description: Results file name
                type: string
                default: "/tmp/TestDFSIO_results.log"
        buffersize:
                description: Buffer size in bytes
                type: integer
                default: 1000000
teragen:
    description: Generate data with teragen
    params:
        size:
            description: The number of 100 byte rows, default to 1GB of data to generate
            type: integer
            default: 10000000
        indir:
            description: HDFS directory where generated data is stored
            type: string
            default: 'tera_demo_in'
terasort:
    description: Runs teragen to generate sample data, and then runs terasort to sort that data
    params:
        indir:
            description: HDFS directory where generated data is stored
            type: string
            default: 'tera_demo_in'
        outdir:
            description: HDFS directory where sorted data is stored
            type: string
            default: 'tera_demo_out'
        size:
            description: The number of 100 byte rows, default to 1GB of data to generate and sort
            type: integer
            default: 10000000
        maps:
            description: The default number of map tasks per job. 1-20
            type: integer
            default: 1
        reduces:
            description: The default number of reduce tasks per job. Typically set to 99% of the cluster's reduce capacity, so that if a node fails the reduces can still be executed in a single wave. Try 1-20
            type: integer
            default: 1
        numtasks:
            description: How many tasks to run per jvm. If set to -1, there is no limit.
            type: integer
            default: 1
        compression:
            description: >
                        Enable or Disable mapred output (intermediate) compression.
                        LocalDefault will run with your current local hadoop configuration.
                        Default means default hadoop deflate codec.
                        One of: Gzip, BZip2, Snappy, Lzo, Default, Disable, LocalDefault
                        These are all case sensitive.
            type: string
            default: "LocalDefault"
            enum: [Gzip, BZip2, Snappy, Lzo, Default, Disable, LocalDefault]